var/home/core/zuul-output/0000755000175000017500000000000015111521030014512 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111541770015473 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005417607015111541763017713 0ustar rootrootNov 26 06:48:23 crc systemd[1]: Starting Kubernetes Kubelet... Nov 26 06:48:23 crc restorecon[4457]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:23 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 06:48:24 crc restorecon[4457]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 26 06:48:24 crc kubenswrapper[4492]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 06:48:24 crc kubenswrapper[4492]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 26 06:48:24 crc kubenswrapper[4492]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 06:48:24 crc kubenswrapper[4492]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 06:48:24 crc kubenswrapper[4492]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 26 06:48:24 crc kubenswrapper[4492]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.293188 4492 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296790 4492 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296808 4492 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296812 4492 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296817 4492 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296820 4492 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296824 4492 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296827 4492 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296831 4492 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296834 4492 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296838 4492 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296841 4492 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296846 4492 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296849 4492 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296853 4492 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296857 4492 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296861 4492 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296865 4492 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296870 4492 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296874 4492 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296878 4492 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296881 4492 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296885 4492 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296888 4492 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296891 4492 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296895 4492 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296898 4492 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296901 4492 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296904 4492 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296908 4492 feature_gate.go:330] unrecognized feature gate: Example Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296911 4492 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296915 4492 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296918 4492 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296921 4492 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296925 4492 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296928 4492 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296931 4492 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296935 4492 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296938 4492 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296942 4492 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296945 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296949 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296952 4492 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296955 4492 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296959 4492 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296962 4492 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296965 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296970 4492 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296976 4492 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296979 4492 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296983 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296986 4492 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296990 4492 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296993 4492 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.296997 4492 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297001 4492 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297006 4492 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297010 4492 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297014 4492 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297018 4492 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297021 4492 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297025 4492 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297029 4492 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297032 4492 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297036 4492 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297042 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297046 4492 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297051 4492 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297056 4492 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297060 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297064 4492 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.297067 4492 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297457 4492 flags.go:64] FLAG: --address="0.0.0.0" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297470 4492 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297477 4492 flags.go:64] FLAG: --anonymous-auth="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297482 4492 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297488 4492 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297492 4492 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297496 4492 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297501 4492 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297506 4492 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297510 4492 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297514 4492 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297518 4492 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297522 4492 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297526 4492 flags.go:64] FLAG: --cgroup-root="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297530 4492 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297534 4492 flags.go:64] FLAG: --client-ca-file="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297538 4492 flags.go:64] FLAG: --cloud-config="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297542 4492 flags.go:64] FLAG: --cloud-provider="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297546 4492 flags.go:64] FLAG: --cluster-dns="[]" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297565 4492 flags.go:64] FLAG: --cluster-domain="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297570 4492 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297575 4492 flags.go:64] FLAG: --config-dir="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297579 4492 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297583 4492 flags.go:64] FLAG: --container-log-max-files="5" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297595 4492 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297599 4492 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297603 4492 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297607 4492 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297611 4492 flags.go:64] FLAG: --contention-profiling="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297615 4492 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297619 4492 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297623 4492 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297627 4492 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297632 4492 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297636 4492 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297640 4492 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297643 4492 flags.go:64] FLAG: --enable-load-reader="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297647 4492 flags.go:64] FLAG: --enable-server="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297651 4492 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297657 4492 flags.go:64] FLAG: --event-burst="100" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297662 4492 flags.go:64] FLAG: --event-qps="50" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297666 4492 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297670 4492 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297673 4492 flags.go:64] FLAG: --eviction-hard="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297679 4492 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297682 4492 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297686 4492 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297690 4492 flags.go:64] FLAG: --eviction-soft="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297693 4492 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297697 4492 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297701 4492 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297705 4492 flags.go:64] FLAG: --experimental-mounter-path="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297709 4492 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297712 4492 flags.go:64] FLAG: --fail-swap-on="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297716 4492 flags.go:64] FLAG: --feature-gates="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297720 4492 flags.go:64] FLAG: --file-check-frequency="20s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297725 4492 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297728 4492 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297732 4492 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297736 4492 flags.go:64] FLAG: --healthz-port="10248" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297740 4492 flags.go:64] FLAG: --help="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297743 4492 flags.go:64] FLAG: --hostname-override="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297747 4492 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297751 4492 flags.go:64] FLAG: --http-check-frequency="20s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297754 4492 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297758 4492 flags.go:64] FLAG: --image-credential-provider-config="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297761 4492 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297765 4492 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297769 4492 flags.go:64] FLAG: --image-service-endpoint="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297772 4492 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297776 4492 flags.go:64] FLAG: --kube-api-burst="100" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297780 4492 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297784 4492 flags.go:64] FLAG: --kube-api-qps="50" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297788 4492 flags.go:64] FLAG: --kube-reserved="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297792 4492 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297795 4492 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297800 4492 flags.go:64] FLAG: --kubelet-cgroups="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297803 4492 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297807 4492 flags.go:64] FLAG: --lock-file="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297810 4492 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297814 4492 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297817 4492 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297823 4492 flags.go:64] FLAG: --log-json-split-stream="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297826 4492 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297830 4492 flags.go:64] FLAG: --log-text-split-stream="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297833 4492 flags.go:64] FLAG: --logging-format="text" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297837 4492 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297841 4492 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297844 4492 flags.go:64] FLAG: --manifest-url="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297848 4492 flags.go:64] FLAG: --manifest-url-header="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297853 4492 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297857 4492 flags.go:64] FLAG: --max-open-files="1000000" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297861 4492 flags.go:64] FLAG: --max-pods="110" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297865 4492 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297869 4492 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297873 4492 flags.go:64] FLAG: --memory-manager-policy="None" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297876 4492 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297880 4492 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297884 4492 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297887 4492 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297899 4492 flags.go:64] FLAG: --node-status-max-images="50" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297903 4492 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297908 4492 flags.go:64] FLAG: --oom-score-adj="-999" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297911 4492 flags.go:64] FLAG: --pod-cidr="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297915 4492 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297922 4492 flags.go:64] FLAG: --pod-manifest-path="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297926 4492 flags.go:64] FLAG: --pod-max-pids="-1" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297930 4492 flags.go:64] FLAG: --pods-per-core="0" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297934 4492 flags.go:64] FLAG: --port="10250" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297938 4492 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297942 4492 flags.go:64] FLAG: --provider-id="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297946 4492 flags.go:64] FLAG: --qos-reserved="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297950 4492 flags.go:64] FLAG: --read-only-port="10255" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297954 4492 flags.go:64] FLAG: --register-node="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297958 4492 flags.go:64] FLAG: --register-schedulable="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297962 4492 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297969 4492 flags.go:64] FLAG: --registry-burst="10" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297973 4492 flags.go:64] FLAG: --registry-qps="5" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297977 4492 flags.go:64] FLAG: --reserved-cpus="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297981 4492 flags.go:64] FLAG: --reserved-memory="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297986 4492 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297990 4492 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297994 4492 flags.go:64] FLAG: --rotate-certificates="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.297998 4492 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298001 4492 flags.go:64] FLAG: --runonce="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298005 4492 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298009 4492 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298014 4492 flags.go:64] FLAG: --seccomp-default="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298018 4492 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298021 4492 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298025 4492 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298029 4492 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298033 4492 flags.go:64] FLAG: --storage-driver-password="root" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298038 4492 flags.go:64] FLAG: --storage-driver-secure="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298042 4492 flags.go:64] FLAG: --storage-driver-table="stats" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298045 4492 flags.go:64] FLAG: --storage-driver-user="root" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298049 4492 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298053 4492 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298056 4492 flags.go:64] FLAG: --system-cgroups="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298060 4492 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298066 4492 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298070 4492 flags.go:64] FLAG: --tls-cert-file="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298074 4492 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298079 4492 flags.go:64] FLAG: --tls-min-version="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298083 4492 flags.go:64] FLAG: --tls-private-key-file="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298087 4492 flags.go:64] FLAG: --topology-manager-policy="none" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298090 4492 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298094 4492 flags.go:64] FLAG: --topology-manager-scope="container" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298098 4492 flags.go:64] FLAG: --v="2" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298104 4492 flags.go:64] FLAG: --version="false" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298108 4492 flags.go:64] FLAG: --vmodule="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298113 4492 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298117 4492 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298245 4492 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298251 4492 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298256 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298264 4492 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298268 4492 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298272 4492 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298283 4492 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298287 4492 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298291 4492 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298296 4492 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298300 4492 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298304 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298310 4492 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298313 4492 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298317 4492 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298321 4492 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298325 4492 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298328 4492 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298332 4492 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298335 4492 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298338 4492 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298342 4492 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298345 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298349 4492 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298353 4492 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298356 4492 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298359 4492 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298362 4492 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298365 4492 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298369 4492 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298372 4492 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298375 4492 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298378 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298381 4492 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298384 4492 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298389 4492 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298393 4492 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298396 4492 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298399 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298403 4492 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298406 4492 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298410 4492 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298413 4492 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298417 4492 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298422 4492 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298425 4492 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298429 4492 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298433 4492 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298436 4492 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298439 4492 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298443 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298446 4492 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298449 4492 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298453 4492 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298456 4492 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298459 4492 feature_gate.go:330] unrecognized feature gate: Example Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298462 4492 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298465 4492 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298469 4492 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298472 4492 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298475 4492 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298478 4492 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298482 4492 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298486 4492 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298489 4492 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298492 4492 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298496 4492 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298500 4492 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298503 4492 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298506 4492 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.298509 4492 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.298520 4492 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.304881 4492 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.304913 4492 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.304981 4492 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.304990 4492 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.304998 4492 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305003 4492 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305007 4492 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305012 4492 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305016 4492 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305021 4492 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305025 4492 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305029 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305033 4492 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305037 4492 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305041 4492 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305045 4492 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305048 4492 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305051 4492 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305055 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305058 4492 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305062 4492 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305065 4492 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305068 4492 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305072 4492 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305075 4492 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305079 4492 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305082 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305085 4492 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305089 4492 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305092 4492 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305095 4492 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305099 4492 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305102 4492 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305106 4492 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305109 4492 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305113 4492 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305117 4492 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305120 4492 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305124 4492 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305127 4492 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305131 4492 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305135 4492 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305139 4492 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305142 4492 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305146 4492 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305150 4492 feature_gate.go:330] unrecognized feature gate: Example Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305154 4492 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305157 4492 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305161 4492 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305164 4492 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305168 4492 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305188 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305193 4492 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305197 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305200 4492 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305204 4492 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305207 4492 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305210 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305213 4492 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305217 4492 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305220 4492 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305224 4492 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305227 4492 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305230 4492 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305234 4492 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305238 4492 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305242 4492 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305246 4492 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305249 4492 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305252 4492 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305256 4492 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305260 4492 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305264 4492 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.305271 4492 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305388 4492 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305394 4492 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305398 4492 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305402 4492 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305406 4492 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305409 4492 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305415 4492 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305419 4492 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305423 4492 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305427 4492 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305430 4492 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305434 4492 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305437 4492 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305440 4492 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305444 4492 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305447 4492 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305451 4492 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305455 4492 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305458 4492 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305461 4492 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305465 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305468 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305472 4492 feature_gate.go:330] unrecognized feature gate: Example Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305475 4492 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305478 4492 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305482 4492 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305485 4492 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305488 4492 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305491 4492 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305495 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305498 4492 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305501 4492 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305505 4492 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305508 4492 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305690 4492 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305695 4492 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305698 4492 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305702 4492 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305705 4492 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305708 4492 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305712 4492 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305715 4492 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305718 4492 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305721 4492 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305724 4492 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305728 4492 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305731 4492 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305734 4492 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305738 4492 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305742 4492 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305746 4492 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305749 4492 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305752 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305756 4492 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305761 4492 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305764 4492 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305768 4492 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305772 4492 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305775 4492 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305778 4492 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305781 4492 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305784 4492 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305788 4492 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305791 4492 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305795 4492 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305798 4492 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305801 4492 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305805 4492 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305808 4492 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305811 4492 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.305815 4492 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.305820 4492 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.306450 4492 server.go:940] "Client rotation is on, will bootstrap in background" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.308910 4492 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.308980 4492 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.309774 4492 server.go:997] "Starting client certificate rotation" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.309795 4492 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.310056 4492 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-20 11:02:35.960237736 +0000 UTC Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.310185 4492 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 580h14m11.650054779s for next certificate rotation Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.321670 4492 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.324471 4492 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.334722 4492 log.go:25] "Validated CRI v1 runtime API" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.351165 4492 log.go:25] "Validated CRI v1 image API" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.353334 4492 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.357524 4492 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-26-06-44-51-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.357550 4492 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:49 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/containers/storage/overlay-containers/75d81934760b26101869fbd8e4b5954c62b019c1cc3e5a0c9f82ed8de46b3b22/userdata/shm:{mountpoint:/var/lib/containers/storage/overlay-containers/75d81934760b26101869fbd8e4b5954c62b019c1cc3e5a0c9f82ed8de46b3b22/userdata/shm major:0 minor:42 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:50 fsType:tmpfs blockSize:0} overlay_0-43:{mountpoint:/var/lib/containers/storage/overlay/94b752e0a51c0134b00ddef6dc7a933a9d7c1d9bdc88a18dae4192a0d557d623/merged major:0 minor:43 fsType:overlay blockSize:0}] Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.367857 4492 manager.go:217] Machine: {Timestamp:2025-11-26 06:48:24.366726992 +0000 UTC m=+0.250615300 CPUVendorID:AuthenticAMD NumCores:8 NumPhysicalCores:1 NumSockets:8 CpuFrequency:2445406 MemoryCapacity:25199468544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:836cf739-0185-4d24-bd92-dec4516ccf4f BootID:5a30a4c6-2314-4103-8c18-44e795d62516 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:50 Capacity:1073741824 Type:vfs Inodes:3076107 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:12599734272 Type:vfs Inodes:3076107 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:12599734272 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/var/lib/containers/storage/overlay-containers/75d81934760b26101869fbd8e4b5954c62b019c1cc3e5a0c9f82ed8de46b3b22/userdata/shm DeviceMajor:0 DeviceMinor:42 Capacity:65536000 Type:vfs Inodes:3076107 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:49 Capacity:2519945216 Type:vfs Inodes:615221 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:5039894528 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:overlay_0-43 DeviceMajor:0 DeviceMinor:43 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:429496729600 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:82:7c:c7 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:enp3s0 MacAddress:fa:16:3e:82:7c:c7 Speed:-1 Mtu:1500} {Name:enp7s0 MacAddress:fa:16:3e:71:c3:d3 Speed:-1 Mtu:1440} {Name:enp7s0.20 MacAddress:52:54:00:a5:6d:42 Speed:-1 Mtu:1436} {Name:enp7s0.21 MacAddress:52:54:00:d8:e5:4a Speed:-1 Mtu:1436} {Name:enp7s0.22 MacAddress:52:54:00:a3:9a:af Speed:-1 Mtu:1436} {Name:eth10 MacAddress:02:25:27:f9:3f:d9 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:7e:e3:ee:a5:49:5e Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:25199468544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:65536 Type:Data Level:1} {Id:0 Size:65536 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:65536 Type:Data Level:1} {Id:1 Size:65536 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:65536 Type:Data Level:1} {Id:2 Size:65536 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:65536 Type:Data Level:1} {Id:3 Size:65536 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:65536 Type:Data Level:1} {Id:4 Size:65536 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:65536 Type:Data Level:1} {Id:5 Size:65536 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:65536 Type:Data Level:1} {Id:6 Size:65536 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:65536 Type:Data Level:1} {Id:7 Size:65536 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.368000 4492 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.368080 4492 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.368662 4492 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.368810 4492 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.368837 4492 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.369011 4492 topology_manager.go:138] "Creating topology manager with none policy" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.369020 4492 container_manager_linux.go:303] "Creating device plugin manager" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.369298 4492 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.369323 4492 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.369401 4492 state_mem.go:36] "Initialized new in-memory state store" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.369485 4492 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.371013 4492 kubelet.go:418] "Attempting to sync node with API server" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.371031 4492 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.371056 4492 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.371066 4492 kubelet.go:324] "Adding apiserver pod source" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.371076 4492 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.373267 4492 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.375786 4492 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.375786 4492 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.375928 4492 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 192.168.25.180:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.375789 4492 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.375965 4492 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.25.180:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.378404 4492 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379408 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379436 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379445 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379454 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379468 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379476 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379483 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379494 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379504 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379514 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379525 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.379534 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.380398 4492 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.380892 4492 server.go:1280] "Started kubelet" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.381407 4492 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.381584 4492 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.381664 4492 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:24 crc systemd[1]: Started Kubernetes Kubelet. Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.382240 4492 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.384313 4492 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.384426 4492 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.385072 4492 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.385090 4492 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.384481 4492 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 03:08:23.267911282 +0000 UTC Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.385204 4492 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 212h19m58.882709609s for next certificate rotation Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.385270 4492 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.385452 4492 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.385651 4492 server.go:460] "Adding debug handlers to kubelet server" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.388610 4492 factory.go:55] Registering systemd factory Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.388635 4492 factory.go:221] Registration of the systemd container factory successfully Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.388740 4492 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.388797 4492 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.25.180:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.388866 4492 factory.go:153] Registering CRI-O factory Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.388876 4492 factory.go:221] Registration of the crio container factory successfully Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.388883 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="200ms" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.388936 4492 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.389034 4492 factory.go:103] Registering Raw factory Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.389067 4492 manager.go:1196] Started watching for new ooms in manager Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.389690 4492 manager.go:319] Starting recovery of all containers Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.388880 4492 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 192.168.25.180:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b7bb6ddc8aa77 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 06:48:24.380861047 +0000 UTC m=+0.264749345,LastTimestamp:2025-11-26 06:48:24.380861047 +0000 UTC m=+0.264749345,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.399063 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.399487 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.399562 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.399663 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.399723 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.401338 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.401456 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.401587 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.401690 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403240 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403290 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403325 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403340 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403358 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403370 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403380 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403391 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403400 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403412 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403422 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403433 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403473 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403485 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403499 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403512 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403523 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403540 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403552 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403565 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403575 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403589 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403599 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403610 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403619 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403631 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403641 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403652 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403664 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403677 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403687 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403703 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403714 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403730 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403740 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403753 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403763 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403774 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403784 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403796 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403807 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403816 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403826 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403843 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403854 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403865 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403877 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403897 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403910 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403922 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403937 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403951 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403962 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403972 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403983 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.403994 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404005 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404016 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404028 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404038 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404050 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404059 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404069 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404107 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404121 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404136 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404147 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404159 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404188 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404199 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404211 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404222 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404233 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404243 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404253 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404263 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404273 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404290 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404301 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404315 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404325 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404338 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404349 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404358 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404369 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404380 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404391 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404400 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404412 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404422 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.404432 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407360 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407377 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407387 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407398 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407415 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407431 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407444 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407455 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407466 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407478 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407488 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407499 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407508 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407517 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407529 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407540 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407549 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407559 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407571 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407588 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407597 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407606 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407618 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407627 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407637 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407647 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407663 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407672 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407680 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407690 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407698 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407708 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407719 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407729 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407739 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407749 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407759 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407769 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407809 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407821 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407831 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407844 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407855 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407865 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407879 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407892 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.407904 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.409916 4492 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.409998 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410027 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410044 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410077 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410115 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410142 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410227 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410256 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410296 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410322 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410353 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410382 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410401 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410421 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410439 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410461 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410477 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410496 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410514 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410540 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410560 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410583 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410604 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410641 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410662 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410682 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410706 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410727 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410750 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410770 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410784 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410814 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410844 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410872 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410893 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410917 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410943 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410961 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.410991 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411017 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411041 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411059 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411078 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411098 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411117 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411136 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411152 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411192 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411203 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411213 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411230 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411242 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411256 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411269 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411296 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411308 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411320 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411333 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411343 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411360 4492 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411372 4492 reconstruct.go:97] "Volume reconstruction finished" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.411380 4492 reconciler.go:26] "Reconciler: start to sync state" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.412638 4492 manager.go:324] Recovery completed Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.421498 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.422829 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.422944 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.423036 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.424722 4492 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.425094 4492 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.425376 4492 state_mem.go:36] "Initialized new in-memory state store" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.433313 4492 policy_none.go:49] "None policy: Start" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.434469 4492 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.434231 4492 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.434822 4492 state_mem.go:35] "Initializing new in-memory state store" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.437229 4492 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.437282 4492 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.437315 4492 kubelet.go:2335] "Starting kubelet main sync loop" Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.437356 4492 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.437999 4492 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.438039 4492 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.25.180:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.481297 4492 manager.go:334] "Starting Device Plugin manager" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.481371 4492 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.481392 4492 server.go:79] "Starting device plugin registration server" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.481885 4492 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.481911 4492 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.482067 4492 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.482185 4492 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.482200 4492 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.490699 4492 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.538314 4492 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.538468 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.540086 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.540149 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.540162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.540479 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.540875 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.540932 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.542336 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.542363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.542374 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.542422 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.542443 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.542461 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.542547 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.542664 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.542711 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.543311 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.543344 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.543369 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.543549 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.543840 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.543876 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.543917 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.543908 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.543960 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.545011 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.545076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.545091 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.545394 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.545641 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.545737 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.547639 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.547678 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.547688 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.547987 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.548031 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.548658 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.548679 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.548693 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.549425 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.549568 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.549573 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.550240 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.549623 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.550313 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.582894 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.583749 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.583799 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.583811 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.583833 4492 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.584231 4492 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 192.168.25.180:6443: connect: connection refused" node="crc" Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.589847 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="400ms" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612214 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612248 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612273 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612297 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612323 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612345 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612460 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612511 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612535 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612566 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612588 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612649 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612700 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612729 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.612752 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713768 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713811 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713833 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713855 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713874 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713892 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713909 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713925 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713940 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713957 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713973 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713993 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.713996 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714008 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714061 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714130 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714128 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714152 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714166 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714208 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714227 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714248 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714272 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714306 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714326 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714357 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714384 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714415 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714192 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.714443 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.784705 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.786122 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.786158 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.786188 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.786212 4492 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.786515 4492 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 192.168.25.180:6443: connect: connection refused" node="crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.866665 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.875596 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.887093 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-0d7bf99cdc713cd223c766d4ff77c97223e78e7c4ad660e2ad72cefdd87760ec WatchSource:0}: Error finding container 0d7bf99cdc713cd223c766d4ff77c97223e78e7c4ad660e2ad72cefdd87760ec: Status 404 returned error can't find the container with id 0d7bf99cdc713cd223c766d4ff77c97223e78e7c4ad660e2ad72cefdd87760ec Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.891566 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.904212 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: I1126 06:48:24.907616 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.909616 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-7ac5a17819e26c5fc4c39b3f571e3ed687bcf0db2a72bceec6f7686045360751 WatchSource:0}: Error finding container 7ac5a17819e26c5fc4c39b3f571e3ed687bcf0db2a72bceec6f7686045360751: Status 404 returned error can't find the container with id 7ac5a17819e26c5fc4c39b3f571e3ed687bcf0db2a72bceec6f7686045360751 Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.951442 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-7e2602b43f4dbf448ed20c0f458188f97f8edb81600a334bd4115a0461adaabd WatchSource:0}: Error finding container 7e2602b43f4dbf448ed20c0f458188f97f8edb81600a334bd4115a0461adaabd: Status 404 returned error can't find the container with id 7e2602b43f4dbf448ed20c0f458188f97f8edb81600a334bd4115a0461adaabd Nov 26 06:48:24 crc kubenswrapper[4492]: W1126 06:48:24.953106 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-2aae2fd1e6bcdef8f77743443d56c4d3f1d03b5ca06b6e1a265a240b3568233e WatchSource:0}: Error finding container 2aae2fd1e6bcdef8f77743443d56c4d3f1d03b5ca06b6e1a265a240b3568233e: Status 404 returned error can't find the container with id 2aae2fd1e6bcdef8f77743443d56c4d3f1d03b5ca06b6e1a265a240b3568233e Nov 26 06:48:24 crc kubenswrapper[4492]: E1126 06:48:24.991517 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="800ms" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.187065 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.188096 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.188131 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.188143 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.188197 4492 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:48:25 crc kubenswrapper[4492]: E1126 06:48:25.188582 4492 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 192.168.25.180:6443: connect: connection refused" node="crc" Nov 26 06:48:25 crc kubenswrapper[4492]: W1126 06:48:25.253496 4492 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:25 crc kubenswrapper[4492]: E1126 06:48:25.253583 4492 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.25.180:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.383072 4492 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.442298 4492 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233" exitCode=0 Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.442409 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.442465 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7ac5a17819e26c5fc4c39b3f571e3ed687bcf0db2a72bceec6f7686045360751"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.442625 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.444433 4492 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="4bd6fe7d38f84cfedbd45716bbb9bc9265e332d672766c4c85c5910f4c59ee2d" exitCode=0 Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.444561 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"4bd6fe7d38f84cfedbd45716bbb9bc9265e332d672766c4c85c5910f4c59ee2d"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.444593 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"986e4543d00c4c57b2d30d506ef8b4c71777b23d15610fe5638c38fccedacf1e"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.444782 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.446990 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.447021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.447034 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.448308 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.448377 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.448392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.449428 4492 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88" exitCode=0 Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.449489 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.449641 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"0d7bf99cdc713cd223c766d4ff77c97223e78e7c4ad660e2ad72cefdd87760ec"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.449746 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.450643 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.451411 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.451431 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.451441 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.451712 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.451742 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.451753 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.452159 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.452204 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2aae2fd1e6bcdef8f77743443d56c4d3f1d03b5ca06b6e1a265a240b3568233e"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.453782 4492 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586" exitCode=0 Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.453813 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.453831 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7e2602b43f4dbf448ed20c0f458188f97f8edb81600a334bd4115a0461adaabd"} Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.453901 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.454455 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.454476 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.454485 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:25 crc kubenswrapper[4492]: W1126 06:48:25.558236 4492 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:25 crc kubenswrapper[4492]: E1126 06:48:25.558317 4492 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.25.180:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:48:25 crc kubenswrapper[4492]: W1126 06:48:25.721759 4492 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:25 crc kubenswrapper[4492]: E1126 06:48:25.721883 4492 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.25.180:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:48:25 crc kubenswrapper[4492]: E1126 06:48:25.792382 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="1.6s" Nov 26 06:48:25 crc kubenswrapper[4492]: W1126 06:48:25.837229 4492 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 192.168.25.180:6443: connect: connection refused Nov 26 06:48:25 crc kubenswrapper[4492]: E1126 06:48:25.837367 4492 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 192.168.25.180:6443: connect: connection refused" logger="UnhandledError" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.989088 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.990144 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.990217 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.990228 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:25 crc kubenswrapper[4492]: I1126 06:48:25.990482 4492 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:48:25 crc kubenswrapper[4492]: E1126 06:48:25.990912 4492 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 192.168.25.180:6443: connect: connection refused" node="crc" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.458784 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.458841 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.458853 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.458863 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.458873 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.458988 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.459836 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.459863 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.459872 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.461097 4492 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f51b9153796b1b926d5f2c0bdc5d55d1be2e33a0df568da392b932178973ef64" exitCode=0 Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.461153 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f51b9153796b1b926d5f2c0bdc5d55d1be2e33a0df568da392b932178973ef64"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.461262 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.462065 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.462089 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.462099 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.462483 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"390b1499bd9aae353a574f1d5ca4243dda7d4576837cf40a0118c53cff23ebdb"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.462576 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.463126 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.463141 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.463149 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.465886 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.465910 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.465921 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.465971 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.466544 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.466568 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.466578 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.468658 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.468683 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.468693 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d"} Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.468753 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.469325 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.469346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:26 crc kubenswrapper[4492]: I1126 06:48:26.469355 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.181796 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.353053 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.473909 4492 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="8cfc778e8208b9ac4eecdcec38b72db997cdd2a68b19cc8e30bdde590da553fd" exitCode=0 Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.474000 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"8cfc778e8208b9ac4eecdcec38b72db997cdd2a68b19cc8e30bdde590da553fd"} Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.474058 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.474211 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.474615 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.474618 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.474682 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.474949 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.474967 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.474975 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.475039 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.475059 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.475070 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.475264 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.475301 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.475312 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.509685 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.591211 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.592044 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.592074 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.592085 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:27 crc kubenswrapper[4492]: I1126 06:48:27.592104 4492 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.479266 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5bd197c99b6d8e405f2023f3985d14579f2da73008cb6219bde9ad309cf0c7b8"} Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.479325 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"711af18dbbce5875faa107da6007d9fa0fd99ba58c8e0e033ad956ada538a9c7"} Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.479335 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6d0d3a70003d231c58cf949db69367229e45c44813a969360fa4d22f17b676fd"} Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.479341 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.479341 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.479442 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.479344 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ed249be3ee240d3abc137ebe93a7f0595fd30e86f3d566029d2aafada08f0b3e"} Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.479809 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7689d03ba1a45b28d33ecb66a7451c4e3767d076eba9751dc438b44bf692b0e5"} Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.480097 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.480127 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.480137 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.480394 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.480692 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.480701 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.480417 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.480921 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.480947 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:28 crc kubenswrapper[4492]: I1126 06:48:28.819535 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:29 crc kubenswrapper[4492]: I1126 06:48:29.480611 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:29 crc kubenswrapper[4492]: I1126 06:48:29.480963 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:29 crc kubenswrapper[4492]: I1126 06:48:29.481446 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:29 crc kubenswrapper[4492]: I1126 06:48:29.481491 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:29 crc kubenswrapper[4492]: I1126 06:48:29.481500 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:29 crc kubenswrapper[4492]: I1126 06:48:29.481791 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:29 crc kubenswrapper[4492]: I1126 06:48:29.481810 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:29 crc kubenswrapper[4492]: I1126 06:48:29.481818 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:30 crc kubenswrapper[4492]: I1126 06:48:30.483638 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:30 crc kubenswrapper[4492]: I1126 06:48:30.484164 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:30 crc kubenswrapper[4492]: I1126 06:48:30.484212 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:30 crc kubenswrapper[4492]: I1126 06:48:30.484220 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:30 crc kubenswrapper[4492]: I1126 06:48:30.509782 4492 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 06:48:30 crc kubenswrapper[4492]: I1126 06:48:30.509855 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 06:48:31 crc kubenswrapper[4492]: I1126 06:48:31.990978 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:48:31 crc kubenswrapper[4492]: I1126 06:48:31.991149 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:31 crc kubenswrapper[4492]: I1126 06:48:31.992076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:31 crc kubenswrapper[4492]: I1126 06:48:31.992137 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:31 crc kubenswrapper[4492]: I1126 06:48:31.992147 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.043891 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.044043 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.045205 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.045310 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.045371 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.431801 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.431965 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.434920 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.434950 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.434962 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.436798 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.489225 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.489932 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.489956 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:33 crc kubenswrapper[4492]: I1126 06:48:33.489964 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:34 crc kubenswrapper[4492]: E1126 06:48:34.491222 4492 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 06:48:36 crc kubenswrapper[4492]: I1126 06:48:36.382767 4492 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 26 06:48:36 crc kubenswrapper[4492]: I1126 06:48:36.570865 4492 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 06:48:36 crc kubenswrapper[4492]: I1126 06:48:36.570923 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 06:48:36 crc kubenswrapper[4492]: I1126 06:48:36.578124 4492 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 06:48:36 crc kubenswrapper[4492]: I1126 06:48:36.578188 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 06:48:37 crc kubenswrapper[4492]: I1126 06:48:37.186746 4492 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]log ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]etcd ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/generic-apiserver-start-informers ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/priority-and-fairness-filter ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-apiextensions-informers ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-apiextensions-controllers ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/crd-informer-synced ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-system-namespaces-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 26 06:48:37 crc kubenswrapper[4492]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 26 06:48:37 crc kubenswrapper[4492]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/bootstrap-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/start-kube-aggregator-informers ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/apiservice-registration-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/apiservice-discovery-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]autoregister-completion ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/apiservice-openapi-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 26 06:48:37 crc kubenswrapper[4492]: livez check failed Nov 26 06:48:37 crc kubenswrapper[4492]: I1126 06:48:37.186984 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:48:37 crc kubenswrapper[4492]: I1126 06:48:37.356219 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:37 crc kubenswrapper[4492]: I1126 06:48:37.356312 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:37 crc kubenswrapper[4492]: I1126 06:48:37.357326 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:37 crc kubenswrapper[4492]: I1126 06:48:37.357351 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:37 crc kubenswrapper[4492]: I1126 06:48:37.357361 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.156232 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.156406 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.157322 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.157353 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.157362 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.181046 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.499425 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.500104 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.500135 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.500142 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:38 crc kubenswrapper[4492]: I1126 06:48:38.507554 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 26 06:48:39 crc kubenswrapper[4492]: I1126 06:48:39.500961 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:39 crc kubenswrapper[4492]: I1126 06:48:39.501739 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:39 crc kubenswrapper[4492]: I1126 06:48:39.501772 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:39 crc kubenswrapper[4492]: I1126 06:48:39.501784 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:40 crc kubenswrapper[4492]: I1126 06:48:40.510793 4492 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 06:48:40 crc kubenswrapper[4492]: I1126 06:48:40.510844 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 06:48:41 crc kubenswrapper[4492]: E1126 06:48:41.580095 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Nov 26 06:48:41 crc kubenswrapper[4492]: E1126 06:48:41.580719 4492 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 26 06:48:41 crc kubenswrapper[4492]: I1126 06:48:41.581152 4492 trace.go:236] Trace[1075232121]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 06:48:28.420) (total time: 13160ms): Nov 26 06:48:41 crc kubenswrapper[4492]: Trace[1075232121]: ---"Objects listed" error: 13160ms (06:48:41.581) Nov 26 06:48:41 crc kubenswrapper[4492]: Trace[1075232121]: [13.160550299s] [13.160550299s] END Nov 26 06:48:41 crc kubenswrapper[4492]: I1126 06:48:41.581299 4492 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 06:48:41 crc kubenswrapper[4492]: I1126 06:48:41.581358 4492 trace.go:236] Trace[950021105]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 06:48:27.624) (total time: 13956ms): Nov 26 06:48:41 crc kubenswrapper[4492]: Trace[950021105]: ---"Objects listed" error: 13956ms (06:48:41.581) Nov 26 06:48:41 crc kubenswrapper[4492]: Trace[950021105]: [13.956430515s] [13.956430515s] END Nov 26 06:48:41 crc kubenswrapper[4492]: I1126 06:48:41.581378 4492 trace.go:236] Trace[453614496]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 06:48:27.711) (total time: 13869ms): Nov 26 06:48:41 crc kubenswrapper[4492]: Trace[453614496]: ---"Objects listed" error: 13869ms (06:48:41.581) Nov 26 06:48:41 crc kubenswrapper[4492]: Trace[453614496]: [13.869393194s] [13.869393194s] END Nov 26 06:48:41 crc kubenswrapper[4492]: I1126 06:48:41.581409 4492 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 06:48:41 crc kubenswrapper[4492]: I1126 06:48:41.581198 4492 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 26 06:48:41 crc kubenswrapper[4492]: I1126 06:48:41.581382 4492 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 06:48:41 crc kubenswrapper[4492]: I1126 06:48:41.581258 4492 trace.go:236] Trace[2004191824]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 06:48:27.840) (total time: 13740ms): Nov 26 06:48:41 crc kubenswrapper[4492]: Trace[2004191824]: ---"Objects listed" error: 13740ms (06:48:41.581) Nov 26 06:48:41 crc kubenswrapper[4492]: Trace[2004191824]: [13.740790342s] [13.740790342s] END Nov 26 06:48:41 crc kubenswrapper[4492]: I1126 06:48:41.581515 4492 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.186678 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.188004 4492 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.188065 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.195256 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.381268 4492 apiserver.go:52] "Watching apiserver" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.383027 4492 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.383263 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.383502 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.383549 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.383512 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.383825 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.384027 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.384164 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.384343 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.384222 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.384412 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.385984 4492 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386313 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386344 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386363 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386381 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386395 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386410 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386425 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386441 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386455 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386469 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386483 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386496 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386510 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386522 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386535 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386548 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386563 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386562 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386578 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386603 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386618 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386633 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386647 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386660 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386673 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386690 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386704 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386719 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386732 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386746 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386759 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386772 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386775 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386785 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386820 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386847 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386861 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386874 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386889 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386902 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386901 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386917 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386930 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386944 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386958 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386973 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.386989 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387020 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387034 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387047 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387069 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387085 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387098 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387113 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387127 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387164 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387194 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387197 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387209 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387225 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387241 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387255 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387269 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387283 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387297 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387311 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387326 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387340 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387354 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387369 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387383 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387386 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387398 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387412 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387424 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387429 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387478 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387500 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387576 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387595 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387585 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387635 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387654 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387670 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387686 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387702 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387702 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387740 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387834 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387844 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387923 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388000 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388074 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388291 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388350 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388373 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388396 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388400 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388470 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388518 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388625 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388648 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388678 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388700 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388769 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.388880 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.389232 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.389305 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.389460 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.389665 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.389786 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.389887 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.389914 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390147 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390246 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.389323 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390337 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390474 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.387720 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390519 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390539 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390554 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390572 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390587 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390600 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390618 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.390645 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.391656 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.391701 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.391701 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.391729 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.391795 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392004 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392067 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392294 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392336 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392346 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392378 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392399 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392581 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392621 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392652 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392673 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392692 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392712 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392733 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392750 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392771 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392792 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392812 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392834 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392854 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392873 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392889 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392908 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392929 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392947 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392968 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392990 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393006 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393027 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393059 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393079 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393098 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393115 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393134 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393153 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393200 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393223 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393241 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393260 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393281 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393300 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393327 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393349 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393369 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393385 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393406 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393429 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393449 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393467 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393488 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393508 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393523 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393540 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393558 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393573 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393591 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393608 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393625 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393643 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393664 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393686 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393703 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393722 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393834 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393861 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393886 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393904 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393974 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393994 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394013 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394033 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394154 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394208 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394228 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394248 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394264 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394283 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394302 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394324 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394343 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394360 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394452 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394482 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394503 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394521 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394537 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394553 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394570 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394585 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394604 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394623 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394641 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394655 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394674 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394755 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394779 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394798 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394814 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394832 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394857 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394876 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394895 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394914 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394931 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394955 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394973 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395001 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395022 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395046 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395073 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395088 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395105 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395152 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395197 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395220 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395240 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395255 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395277 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395297 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395321 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395340 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395365 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395390 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395422 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395442 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395463 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395526 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395550 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395559 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395573 4492 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395583 4492 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395596 4492 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395610 4492 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395623 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395632 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395648 4492 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395657 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395666 4492 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395678 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395689 4492 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395698 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395707 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395720 4492 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395729 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395739 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395747 4492 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395759 4492 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395768 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395777 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395786 4492 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395803 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395813 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395828 4492 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395841 4492 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395850 4492 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395859 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395868 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395884 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395897 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395906 4492 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395918 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395930 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395939 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395948 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395961 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395975 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395989 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396002 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396014 4492 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396023 4492 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396031 4492 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396044 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392792 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.392985 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393192 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393350 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393449 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393461 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399148 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399237 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399258 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399384 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399515 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399372 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399757 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399568 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399855 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393667 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393672 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393850 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394002 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.400782 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.400909 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.400909 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394381 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.400930 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.400664 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394352 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394599 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394385 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394833 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395140 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395255 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395558 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395584 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395809 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.395832 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396010 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.396160 4492 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.401133 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.401149 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:42.901134158 +0000 UTC m=+18.785022457 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396225 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396637 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396703 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.396944 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.397278 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.397349 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.397365 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.397773 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.397776 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.397981 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.398205 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.398267 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.398329 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.398564 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.394727 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.401157 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.401393 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.401410 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.401412 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.401551 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.401646 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.401894 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402003 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402302 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402304 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402356 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402358 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.393633 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402431 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402533 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402629 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402741 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402767 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402927 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.402972 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.403058 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.403074 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.403257 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.403420 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.403405 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.403915 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.404465 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.404508 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.404729 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.404860 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.404874 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.404918 4492 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.404963 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:42.904949823 +0000 UTC m=+18.788838121 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.405101 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.405145 4492 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.405569 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.405603 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.405865 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406084 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406093 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406298 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406445 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406465 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406510 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406526 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406530 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406572 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.406482 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.407109 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.407220 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.407477 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.407577 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.407770 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.408417 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.408491 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.408719 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.399485 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.408835 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.408923 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.408914 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409027 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409029 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409296 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409238 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409203 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409477 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409498 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409547 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409707 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.409858 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.410462 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.410884 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.410888 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.410938 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.411042 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.411382 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:48:42.911369705 +0000 UTC m=+18.795258003 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.411451 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.411460 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.411537 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.412963 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.413014 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.413296 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.413999 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.414082 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.414271 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.413778 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.415997 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.416414 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.417460 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.423229 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.423373 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.423638 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.423447 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.423525 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.423762 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.423810 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.423822 4492 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.423954 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:42.923941254 +0000 UTC m=+18.807829552 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.423897 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.425099 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.425120 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.425131 4492 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.425185 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:42.925155887 +0000 UTC m=+18.809044186 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.425442 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.426727 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.426950 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.427142 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.427800 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.428564 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.429140 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.429396 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.431017 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.432011 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.433311 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.433491 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.434098 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.434159 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.434603 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.434660 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.438265 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.439780 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.442202 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.442861 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.444718 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.444909 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.445909 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.447114 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.447506 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.448482 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.449096 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.450324 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.451431 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.451419 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.452545 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.453234 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.454648 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.454742 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.454990 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.455390 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.456003 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.456994 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.457715 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.458120 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.458854 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.459346 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.459911 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.460844 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.461359 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.462289 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.462698 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.463704 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.464131 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.464332 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.464712 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.465699 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.466158 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.467060 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.467507 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.468471 4492 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.468569 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.470004 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.470812 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.471217 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.471551 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.472565 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.473195 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.473967 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.475313 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.476227 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.476646 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.477392 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.477658 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.478264 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.479126 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.479564 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.480374 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.480824 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.481910 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.482430 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.483280 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.483694 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.484532 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.485025 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.485470 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.496984 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497040 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497090 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497113 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497126 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497135 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497144 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497152 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497161 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497187 4492 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497196 4492 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497205 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497213 4492 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497222 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497233 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497241 4492 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497250 4492 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497252 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497258 4492 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497288 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497297 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497307 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497316 4492 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497325 4492 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497333 4492 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497343 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497353 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497362 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497369 4492 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497379 4492 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497387 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497396 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497404 4492 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497412 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497420 4492 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497429 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497438 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497445 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497453 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497461 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497476 4492 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497484 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497492 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497502 4492 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497510 4492 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497519 4492 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497526 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497535 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497546 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497555 4492 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497564 4492 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497573 4492 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497580 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497588 4492 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497596 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497604 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497614 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497623 4492 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497631 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497639 4492 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497647 4492 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497655 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497664 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497671 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497680 4492 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497687 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497695 4492 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497703 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497710 4492 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497717 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497726 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497734 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497741 4492 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497749 4492 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497757 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497765 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497773 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497781 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497789 4492 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497797 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497804 4492 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497811 4492 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497819 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497826 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497834 4492 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497842 4492 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497849 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497856 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497864 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497872 4492 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497879 4492 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497887 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497898 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497905 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497913 4492 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497921 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497928 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497935 4492 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497945 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497953 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497961 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497968 4492 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497975 4492 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497982 4492 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497990 4492 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.497996 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498004 4492 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498012 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498019 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498027 4492 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498034 4492 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498042 4492 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498058 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498066 4492 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498074 4492 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498081 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498088 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498096 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498103 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498109 4492 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498116 4492 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498124 4492 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498131 4492 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498139 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498148 4492 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498155 4492 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498163 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498185 4492 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498193 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498202 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498209 4492 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498216 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498224 4492 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498232 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498240 4492 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498248 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498257 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498264 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498272 4492 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498279 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498286 4492 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498294 4492 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498303 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498311 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498318 4492 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498326 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498333 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498341 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498350 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498357 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498364 4492 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.498372 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.510912 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.512628 4492 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8" exitCode=255 Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.512676 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8"} Nov 26 06:48:42 crc kubenswrapper[4492]: E1126 06:48:42.517040 4492 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.517308 4492 scope.go:117] "RemoveContainer" containerID="6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.520689 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.530106 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.539804 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.548933 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.555951 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.562151 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.568626 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.702284 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 06:48:42 crc kubenswrapper[4492]: W1126 06:48:42.709908 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-0e3769086df3b19cad6f365598f1fd043357668601e9f6a2bc846a1e542ac0cb WatchSource:0}: Error finding container 0e3769086df3b19cad6f365598f1fd043357668601e9f6a2bc846a1e542ac0cb: Status 404 returned error can't find the container with id 0e3769086df3b19cad6f365598f1fd043357668601e9f6a2bc846a1e542ac0cb Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.711751 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 06:48:42 crc kubenswrapper[4492]: W1126 06:48:42.719831 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-fef0392f49b6cf6aa4e39dd79fd0dac17bcfb21a91b66abea3811057ba2eb683 WatchSource:0}: Error finding container fef0392f49b6cf6aa4e39dd79fd0dac17bcfb21a91b66abea3811057ba2eb683: Status 404 returned error can't find the container with id fef0392f49b6cf6aa4e39dd79fd0dac17bcfb21a91b66abea3811057ba2eb683 Nov 26 06:48:42 crc kubenswrapper[4492]: I1126 06:48:42.738767 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 06:48:42 crc kubenswrapper[4492]: W1126 06:48:42.752794 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-2d4f5e356326bd529d09ad6c451150b226fc555c1cb2604d8b529864a55874c7 WatchSource:0}: Error finding container 2d4f5e356326bd529d09ad6c451150b226fc555c1cb2604d8b529864a55874c7: Status 404 returned error can't find the container with id 2d4f5e356326bd529d09ad6c451150b226fc555c1cb2604d8b529864a55874c7 Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.001112 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.001207 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.001232 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001289 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:48:44.001264515 +0000 UTC m=+19.885152803 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001322 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001337 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001347 4492 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.001343 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.001374 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001377 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001399 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001411 4492 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001426 4492 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001387 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:44.001375243 +0000 UTC m=+19.885263542 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001460 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:44.00145302 +0000 UTC m=+19.885341307 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001460 4492 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001469 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:44.001464412 +0000 UTC m=+19.885352710 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.001537 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:44.00152215 +0000 UTC m=+19.885410448 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.438097 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:43 crc kubenswrapper[4492]: E1126 06:48:43.438217 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.515443 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"2d4f5e356326bd529d09ad6c451150b226fc555c1cb2604d8b529864a55874c7"} Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.517318 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011"} Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.517350 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba"} Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.517361 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"fef0392f49b6cf6aa4e39dd79fd0dac17bcfb21a91b66abea3811057ba2eb683"} Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.518908 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620"} Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.518934 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"0e3769086df3b19cad6f365598f1fd043357668601e9f6a2bc846a1e542ac0cb"} Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.521420 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.523321 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52"} Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.523614 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.532336 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.541239 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.551458 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.563553 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.571666 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.579846 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.591247 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.603355 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.615358 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.626207 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.637950 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.649394 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.671967 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:43 crc kubenswrapper[4492]: I1126 06:48:43.684617 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:43Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.006450 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.006516 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.006539 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.006564 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.006586 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006652 4492 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006658 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:48:46.006629564 +0000 UTC m=+21.890517862 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006678 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006704 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:46.006689287 +0000 UTC m=+21.890577595 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006707 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006722 4492 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006768 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:46.006751964 +0000 UTC m=+21.890640263 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006776 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006791 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006805 4492 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006819 4492 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006840 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:46.006832986 +0000 UTC m=+21.890721285 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.006857 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:46.006849047 +0000 UTC m=+21.890737344 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.438194 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.438293 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.438427 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.438577 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.448825 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.458142 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.467552 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.476291 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.485428 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.498336 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.509001 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.527821 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9"} Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.537543 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.545476 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.555967 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.566977 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.574755 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.582681 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.590804 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.781596 4492 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.782857 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.782894 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.782903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.782954 4492 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.788336 4492 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.788481 4492 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.789162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.789207 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.789217 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.789231 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.789240 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:44Z","lastTransitionTime":"2025-11-26T06:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.801679 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.804212 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.804238 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.804247 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.804258 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.804267 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:44Z","lastTransitionTime":"2025-11-26T06:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.812385 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.814676 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.814714 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.814724 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.814737 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.814745 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:44Z","lastTransitionTime":"2025-11-26T06:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.824117 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.826291 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.826322 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.826332 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.826342 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.826349 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:44Z","lastTransitionTime":"2025-11-26T06:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.834028 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.836408 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.836440 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.836449 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.836462 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.836472 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:44Z","lastTransitionTime":"2025-11-26T06:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.844504 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:44 crc kubenswrapper[4492]: E1126 06:48:44.844607 4492 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.845676 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.845707 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.845715 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.845730 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.845740 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:44Z","lastTransitionTime":"2025-11-26T06:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.947859 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.947888 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.947899 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.947909 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:44 crc kubenswrapper[4492]: I1126 06:48:44.947918 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:44Z","lastTransitionTime":"2025-11-26T06:48:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.051830 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.051863 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.051872 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.051884 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.051892 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.153719 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.153746 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.153754 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.153763 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.153771 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.255697 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.255812 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.255892 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.255959 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.256018 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.357549 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.357583 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.357593 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.357606 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.357615 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.437952 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:45 crc kubenswrapper[4492]: E1126 06:48:45.438067 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.459649 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.459677 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.459687 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.459698 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.459707 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.561542 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.561580 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.561590 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.561606 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.561616 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.663776 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.663812 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.663822 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.663835 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.663846 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.765866 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.765895 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.765906 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.765916 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.765924 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.868033 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.868068 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.868076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.868086 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.868094 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.969879 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.969903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.969911 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.969921 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:45 crc kubenswrapper[4492]: I1126 06:48:45.969929 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:45Z","lastTransitionTime":"2025-11-26T06:48:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.018416 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.018514 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:48:50.018499696 +0000 UTC m=+25.902387993 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.018567 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.018590 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.018681 4492 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.018714 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:50.018706223 +0000 UTC m=+25.902594522 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.018780 4492 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.018848 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:50.018834584 +0000 UTC m=+25.902722882 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.018927 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.018953 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.019013 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.019046 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.019068 4492 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.019105 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:50.01909779 +0000 UTC m=+25.902986088 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.019155 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.019214 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.019226 4492 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.019289 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:50.019274532 +0000 UTC m=+25.903162830 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.071762 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.071784 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.071792 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.071804 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.071813 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.173935 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.173963 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.173971 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.173983 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.173992 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.275674 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.275697 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.275706 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.275732 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.275741 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.377424 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.377453 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.377462 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.377474 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.377482 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.438466 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.438478 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.438562 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:48:46 crc kubenswrapper[4492]: E1126 06:48:46.438692 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.479731 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.479761 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.479770 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.479780 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.479788 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.581420 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.581445 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.581454 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.581463 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.581472 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.682950 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.682976 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.682985 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.682996 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.683003 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.784603 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.784653 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.784663 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.784674 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.784683 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.886245 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.886275 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.886284 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.886295 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.886303 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.988091 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.988128 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.988138 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.988155 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:46 crc kubenswrapper[4492]: I1126 06:48:46.988164 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:46Z","lastTransitionTime":"2025-11-26T06:48:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.089941 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.089982 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.090003 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.090018 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.090028 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:47Z","lastTransitionTime":"2025-11-26T06:48:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.192006 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.192188 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.192265 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.192333 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.192391 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:47Z","lastTransitionTime":"2025-11-26T06:48:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.294763 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.294802 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.294812 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.294825 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.294833 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:47Z","lastTransitionTime":"2025-11-26T06:48:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.396392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.396425 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.396435 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.396449 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.396458 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:47Z","lastTransitionTime":"2025-11-26T06:48:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.438121 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:47 crc kubenswrapper[4492]: E1126 06:48:47.438249 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.497711 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.497737 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.497745 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.497755 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.497762 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:47Z","lastTransitionTime":"2025-11-26T06:48:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.512935 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.515579 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.519552 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.523296 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.532215 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: E1126 06:48:47.537094 4492 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.541462 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.549470 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.558532 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.567100 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.576929 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.590378 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.598399 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.599432 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.599456 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.599464 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.599475 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.599492 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:47Z","lastTransitionTime":"2025-11-26T06:48:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.607257 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.615373 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.623468 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.632103 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.639665 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.647730 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:47Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.700932 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.700981 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.700990 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.701001 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.701008 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:47Z","lastTransitionTime":"2025-11-26T06:48:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.802664 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.802691 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.802701 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.802714 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.802724 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:47Z","lastTransitionTime":"2025-11-26T06:48:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.904336 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.904374 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.904384 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.904399 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:47 crc kubenswrapper[4492]: I1126 06:48:47.904409 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:47Z","lastTransitionTime":"2025-11-26T06:48:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.006269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.006296 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.006304 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.006316 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.006326 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.107739 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.107766 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.107774 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.107786 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.107794 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.209459 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.209484 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.209492 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.209504 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.209513 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.311754 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.311782 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.311789 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.311807 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.311818 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.414035 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.414075 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.414084 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.414096 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.414105 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.437523 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:48 crc kubenswrapper[4492]: E1126 06:48:48.437624 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.437877 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:48 crc kubenswrapper[4492]: E1126 06:48:48.437948 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.515828 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.515853 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.515861 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.515871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.515882 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.618075 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.618105 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.618113 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.618125 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.618133 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.720165 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.720214 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.720224 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.720237 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.720245 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.824234 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.824284 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.824306 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.824330 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.824344 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.892687 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-nrzjd"] Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.893324 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.896323 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.896876 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.897259 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lghgp"] Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.897604 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.898073 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.898460 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-6blv7"] Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.898806 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-5bshd"] Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.898953 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-6lnwf"] Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.899153 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.899195 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6lnwf" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.899349 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.899403 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.899609 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.900149 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.900185 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.900630 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.900764 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.900883 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.901646 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.903823 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.903954 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.909705 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.922697 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.922953 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.923032 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.923452 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.923599 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.923751 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.923884 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.925830 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.926030 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.926065 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.926076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.926092 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.926103 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:48Z","lastTransitionTime":"2025-11-26T06:48:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.928881 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:48Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941323 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-conf-dir\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941363 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-tuning-conf-dir\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941389 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-openvswitch\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941406 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x29l5\" (UniqueName: \"kubernetes.io/projected/026c3325-a592-4828-8e4f-08bcb790014a-kube-api-access-x29l5\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941424 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-bin\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941466 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b104695-0850-4fb3-b2f8-f764435f8694-ovn-node-metrics-cert\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941487 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-cnibin\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941521 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-os-release\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941536 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-ovn-kubernetes\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941548 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-os-release\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941565 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-daemon-config\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941580 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-log-socket\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941593 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-netd\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941620 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-cnibin\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941639 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-cni-dir\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941653 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-var-lib-cni-multus\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941668 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-systemd-units\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941682 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-system-cni-dir\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941696 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-proxy-tls\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941711 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-run-netns\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941734 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-run-multus-certs\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941764 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-system-cni-dir\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941780 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-kubelet\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941819 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-netns\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941893 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-slash\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941922 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-etc-openvswitch\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941944 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-var-lib-kubelet\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941971 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-config\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.941991 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwjt9\" (UniqueName: \"kubernetes.io/projected/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-kube-api-access-lwjt9\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942011 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-socket-dir-parent\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942032 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-systemd\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942048 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-ovn\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942071 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a471ac3f-0ac0-4110-94bb-194c0de0af26-cni-binary-copy\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942093 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-run-k8s-cni-cncf-io\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942108 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/026c3325-a592-4828-8e4f-08bcb790014a-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942123 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-mcd-auth-proxy-config\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942136 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-etc-kubernetes\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942193 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/026c3325-a592-4828-8e4f-08bcb790014a-cni-binary-copy\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942226 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-env-overrides\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942243 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-script-lib\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942265 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmsnp\" (UniqueName: \"kubernetes.io/projected/0151e6e0-df4e-4482-9309-f8cce9bc6ccd-kube-api-access-cmsnp\") pod \"node-resolver-6lnwf\" (UID: \"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\") " pod="openshift-dns/node-resolver-6lnwf" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942281 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942299 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-var-lib-cni-bin\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942316 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt98z\" (UniqueName: \"kubernetes.io/projected/a471ac3f-0ac0-4110-94bb-194c0de0af26-kube-api-access-gt98z\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942331 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-node-log\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942345 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mc5q6\" (UniqueName: \"kubernetes.io/projected/9b104695-0850-4fb3-b2f8-f764435f8694-kube-api-access-mc5q6\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942358 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-rootfs\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942378 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-hostroot\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942395 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0151e6e0-df4e-4482-9309-f8cce9bc6ccd-hosts-file\") pod \"node-resolver-6lnwf\" (UID: \"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\") " pod="openshift-dns/node-resolver-6lnwf" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.942410 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-var-lib-openvswitch\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.950376 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:48Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.961624 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:48Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.970368 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:48Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.978589 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:48Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.987847 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:48Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:48 crc kubenswrapper[4492]: I1126 06:48:48.997277 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:48Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.006756 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.016418 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.025945 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.028257 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.028324 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.028335 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.028349 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.028359 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.035201 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043507 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x29l5\" (UniqueName: \"kubernetes.io/projected/026c3325-a592-4828-8e4f-08bcb790014a-kube-api-access-x29l5\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043562 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-bin\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043584 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b104695-0850-4fb3-b2f8-f764435f8694-ovn-node-metrics-cert\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043602 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-cnibin\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043620 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-os-release\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043636 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-ovn-kubernetes\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043651 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-os-release\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043656 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-bin\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043665 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-daemon-config\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043735 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-cnibin\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043756 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-log-socket\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043773 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-netd\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043790 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-systemd-units\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043806 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-system-cni-dir\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043822 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-cni-dir\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043820 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-os-release\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043839 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-var-lib-cni-multus\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043867 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-system-cni-dir\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043884 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-proxy-tls\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043900 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-run-netns\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043920 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-run-multus-certs\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043940 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-kubelet\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043956 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-netns\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043980 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-slash\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.043996 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-etc-openvswitch\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044015 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-var-lib-kubelet\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044033 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-systemd\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044059 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-ovn\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044077 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-config\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044094 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwjt9\" (UniqueName: \"kubernetes.io/projected/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-kube-api-access-lwjt9\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044113 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-socket-dir-parent\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044131 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/026c3325-a592-4828-8e4f-08bcb790014a-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044150 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-mcd-auth-proxy-config\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044183 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a471ac3f-0ac0-4110-94bb-194c0de0af26-cni-binary-copy\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044202 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-run-k8s-cni-cncf-io\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044221 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/026c3325-a592-4828-8e4f-08bcb790014a-cni-binary-copy\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044237 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-etc-kubernetes\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044259 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-daemon-config\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044270 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmsnp\" (UniqueName: \"kubernetes.io/projected/0151e6e0-df4e-4482-9309-f8cce9bc6ccd-kube-api-access-cmsnp\") pod \"node-resolver-6lnwf\" (UID: \"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\") " pod="openshift-dns/node-resolver-6lnwf" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044287 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-env-overrides\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044307 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-script-lib\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044316 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-ovn-kubernetes\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044325 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-var-lib-cni-bin\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044346 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt98z\" (UniqueName: \"kubernetes.io/projected/a471ac3f-0ac0-4110-94bb-194c0de0af26-kube-api-access-gt98z\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044357 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-os-release\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044367 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044395 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044415 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0151e6e0-df4e-4482-9309-f8cce9bc6ccd-hosts-file\") pod \"node-resolver-6lnwf\" (UID: \"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\") " pod="openshift-dns/node-resolver-6lnwf" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044426 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-cni-dir\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044440 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-var-lib-openvswitch\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044451 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-cnibin\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044439 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-socket-dir-parent\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044476 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-log-socket\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044490 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0151e6e0-df4e-4482-9309-f8cce9bc6ccd-hosts-file\") pod \"node-resolver-6lnwf\" (UID: \"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\") " pod="openshift-dns/node-resolver-6lnwf" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044514 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-var-lib-openvswitch\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044519 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-system-cni-dir\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044540 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-node-log\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044554 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-cnibin\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044552 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-etc-openvswitch\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044577 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-var-lib-cni-multus\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044576 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-systemd-units\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044608 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-var-lib-kubelet\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044610 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-system-cni-dir\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044655 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-slash\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044661 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-ovn\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045060 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/026c3325-a592-4828-8e4f-08bcb790014a-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045131 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a471ac3f-0ac0-4110-94bb-194c0de0af26-cni-binary-copy\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045155 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-run-k8s-cni-cncf-io\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.044466 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-node-log\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045361 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-etc-kubernetes\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045384 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-netd\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045421 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-run-multus-certs\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045441 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-run-netns\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045469 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mc5q6\" (UniqueName: \"kubernetes.io/projected/9b104695-0850-4fb3-b2f8-f764435f8694-kube-api-access-mc5q6\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045490 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-rootfs\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045507 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-hostroot\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045526 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-tuning-conf-dir\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045543 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-openvswitch\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045559 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-conf-dir\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045586 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-config\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045610 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/026c3325-a592-4828-8e4f-08bcb790014a-cni-binary-copy\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045837 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-mcd-auth-proxy-config\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045863 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-host-var-lib-cni-bin\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045878 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-kubelet\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045900 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-systemd\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.045974 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-rootfs\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.046010 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-netns\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.046028 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-hostroot\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.046030 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a471ac3f-0ac0-4110-94bb-194c0de0af26-multus-conf-dir\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.046084 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-script-lib\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.046123 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-openvswitch\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.046404 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/026c3325-a592-4828-8e4f-08bcb790014a-tuning-conf-dir\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.046502 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-env-overrides\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.048873 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b104695-0850-4fb3-b2f8-f764435f8694-ovn-node-metrics-cert\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.048977 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-proxy-tls\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.059654 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.067794 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt98z\" (UniqueName: \"kubernetes.io/projected/a471ac3f-0ac0-4110-94bb-194c0de0af26-kube-api-access-gt98z\") pod \"multus-5bshd\" (UID: \"a471ac3f-0ac0-4110-94bb-194c0de0af26\") " pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.070700 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwjt9\" (UniqueName: \"kubernetes.io/projected/04bf18ad-d2a1-4b30-a3fa-2b6247363c82-kube-api-access-lwjt9\") pod \"machine-config-daemon-6blv7\" (UID: \"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\") " pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.074892 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mc5q6\" (UniqueName: \"kubernetes.io/projected/9b104695-0850-4fb3-b2f8-f764435f8694-kube-api-access-mc5q6\") pod \"ovnkube-node-lghgp\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.076065 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x29l5\" (UniqueName: \"kubernetes.io/projected/026c3325-a592-4828-8e4f-08bcb790014a-kube-api-access-x29l5\") pod \"multus-additional-cni-plugins-nrzjd\" (UID: \"026c3325-a592-4828-8e4f-08bcb790014a\") " pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.076206 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.078827 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmsnp\" (UniqueName: \"kubernetes.io/projected/0151e6e0-df4e-4482-9309-f8cce9bc6ccd-kube-api-access-cmsnp\") pod \"node-resolver-6lnwf\" (UID: \"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\") " pod="openshift-dns/node-resolver-6lnwf" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.086097 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.095356 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.109568 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.119877 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.128307 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.130957 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.131009 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.131021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.131035 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.131044 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.140892 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.150373 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.160924 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.169423 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.206643 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.211579 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.216671 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6lnwf" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.225451 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-5bshd" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.232296 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.233431 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.233463 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.233473 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.233490 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.233499 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:49 crc kubenswrapper[4492]: W1126 06:48:49.250501 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda471ac3f_0ac0_4110_94bb_194c0de0af26.slice/crio-7ae8f17c6fc65073cf039f9ee0c745255476c3f09bef6710dec71d862c9647fa WatchSource:0}: Error finding container 7ae8f17c6fc65073cf039f9ee0c745255476c3f09bef6710dec71d862c9647fa: Status 404 returned error can't find the container with id 7ae8f17c6fc65073cf039f9ee0c745255476c3f09bef6710dec71d862c9647fa Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.338698 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.338728 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.338739 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.338775 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.338785 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.437794 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:49 crc kubenswrapper[4492]: E1126 06:48:49.437972 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.441080 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.441113 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.441125 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.441145 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.441161 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.542665 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" event={"ID":"026c3325-a592-4828-8e4f-08bcb790014a","Type":"ContainerStarted","Data":"70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.542709 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" event={"ID":"026c3325-a592-4828-8e4f-08bcb790014a","Type":"ContainerStarted","Data":"b84d9996b23a55c0c6ea88fb7dad4a12cc3e0a635655b345d9deccd6ffdec4be"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.544896 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.544932 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.544958 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.544981 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.544992 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.551351 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5bshd" event={"ID":"a471ac3f-0ac0-4110-94bb-194c0de0af26","Type":"ContainerStarted","Data":"9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.551400 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5bshd" event={"ID":"a471ac3f-0ac0-4110-94bb-194c0de0af26","Type":"ContainerStarted","Data":"7ae8f17c6fc65073cf039f9ee0c745255476c3f09bef6710dec71d862c9647fa"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.553215 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6lnwf" event={"ID":"0151e6e0-df4e-4482-9309-f8cce9bc6ccd","Type":"ContainerStarted","Data":"5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.553339 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6lnwf" event={"ID":"0151e6e0-df4e-4482-9309-f8cce9bc6ccd","Type":"ContainerStarted","Data":"37db7a6b20d839be04d1640a1585260e7faf7c49cba184ebe5b162703f3b55eb"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.555117 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43" exitCode=0 Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.555195 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.555232 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"af14b9a6ebbe92710db288ededc518253b4cd26ee8e5926763078dc46ce49b07"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.558045 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.558500 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.558540 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.558551 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"0b95ae50318916f9b60dfa2ccb63bfe00e14a52ec87fa83b189fd7581c8ab379"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.590341 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.608940 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.627586 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.643457 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.647204 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.647303 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.647373 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.647440 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.647491 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.660041 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.671156 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.688522 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.703413 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.714930 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.726348 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.735699 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.748833 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.750251 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.750287 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.750296 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.750311 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.750320 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.764256 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.773440 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.786402 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.798243 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.813761 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.827273 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.838605 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.853135 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.853203 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.853218 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.853236 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.853248 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.860664 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.873577 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.881636 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.890042 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.902481 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.915507 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:49Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.955303 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.955346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.955359 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.955376 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:49 crc kubenswrapper[4492]: I1126 06:48:49.955386 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:49Z","lastTransitionTime":"2025-11-26T06:48:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.054262 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.054463 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:48:58.054430761 +0000 UTC m=+33.938319059 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.054844 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.054881 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.054925 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.054951 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055090 4492 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055118 4492 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055148 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:58.055135907 +0000 UTC m=+33.939024204 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055202 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:58.055159241 +0000 UTC m=+33.939047539 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055281 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055299 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055313 4492 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055342 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:58.055334309 +0000 UTC m=+33.939222607 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055392 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055401 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055407 4492 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.055426 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:48:58.05541974 +0000 UTC m=+33.939308039 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.057880 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.057912 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.057924 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.057943 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.057953 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.160020 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.160060 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.160072 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.160090 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.160101 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.261889 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.261923 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.261933 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.261949 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.261960 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.363465 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.363501 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.363510 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.363526 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.363542 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.439104 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.439298 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.439545 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:48:50 crc kubenswrapper[4492]: E1126 06:48:50.439584 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.465975 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.466006 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.466017 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.466034 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.466044 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.563988 4492 generic.go:334] "Generic (PLEG): container finished" podID="026c3325-a592-4828-8e4f-08bcb790014a" containerID="70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402" exitCode=0 Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.564077 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" event={"ID":"026c3325-a592-4828-8e4f-08bcb790014a","Type":"ContainerDied","Data":"70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.567461 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.567487 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.567496 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.567524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.567534 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.568419 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.568447 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.568457 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.568467 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.568477 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.568485 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.581141 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.594271 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.604388 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.614762 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.623819 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.635377 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.644104 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.659697 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.669611 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.669671 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.669681 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.669698 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.669706 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.670874 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.678245 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.685665 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.695123 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.704028 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.736504 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-hjxcm"] Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.736822 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.739196 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.740634 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.740795 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.740837 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.750254 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.759746 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.761028 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cfdb68d9-168b-4d04-a6ee-b2deef54a9ab-host\") pod \"node-ca-hjxcm\" (UID: \"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\") " pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.761148 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cfdb68d9-168b-4d04-a6ee-b2deef54a9ab-serviceca\") pod \"node-ca-hjxcm\" (UID: \"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\") " pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.761215 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpmw5\" (UniqueName: \"kubernetes.io/projected/cfdb68d9-168b-4d04-a6ee-b2deef54a9ab-kube-api-access-cpmw5\") pod \"node-ca-hjxcm\" (UID: \"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\") " pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.772499 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.772535 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.772545 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.772563 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.772573 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.776219 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.784688 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.792975 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.801330 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.811009 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.819218 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.828758 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.837428 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.851503 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.862247 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cfdb68d9-168b-4d04-a6ee-b2deef54a9ab-host\") pod \"node-ca-hjxcm\" (UID: \"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\") " pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.862316 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cfdb68d9-168b-4d04-a6ee-b2deef54a9ab-serviceca\") pod \"node-ca-hjxcm\" (UID: \"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\") " pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.862359 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpmw5\" (UniqueName: \"kubernetes.io/projected/cfdb68d9-168b-4d04-a6ee-b2deef54a9ab-kube-api-access-cpmw5\") pod \"node-ca-hjxcm\" (UID: \"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\") " pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.862370 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/cfdb68d9-168b-4d04-a6ee-b2deef54a9ab-host\") pod \"node-ca-hjxcm\" (UID: \"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\") " pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.862837 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.863657 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/cfdb68d9-168b-4d04-a6ee-b2deef54a9ab-serviceca\") pod \"node-ca-hjxcm\" (UID: \"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\") " pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.871564 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.874207 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.874234 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.874245 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.874262 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.874273 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.880467 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:50Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.882031 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpmw5\" (UniqueName: \"kubernetes.io/projected/cfdb68d9-168b-4d04-a6ee-b2deef54a9ab-kube-api-access-cpmw5\") pod \"node-ca-hjxcm\" (UID: \"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\") " pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.976422 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.976455 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.976465 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.976485 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:50 crc kubenswrapper[4492]: I1126 06:48:50.976495 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:50Z","lastTransitionTime":"2025-11-26T06:48:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.048913 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-hjxcm" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.078681 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.078715 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.078730 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.078742 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.078751 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:51Z","lastTransitionTime":"2025-11-26T06:48:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.181662 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.181693 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.181707 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.181723 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.181731 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:51Z","lastTransitionTime":"2025-11-26T06:48:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.284229 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.284268 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.284277 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.284297 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.284310 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:51Z","lastTransitionTime":"2025-11-26T06:48:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.386369 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.386395 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.386405 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.386418 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.386430 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:51Z","lastTransitionTime":"2025-11-26T06:48:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.438498 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:51 crc kubenswrapper[4492]: E1126 06:48:51.438862 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.488876 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.488917 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.488927 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.488943 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.488952 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:51Z","lastTransitionTime":"2025-11-26T06:48:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.573699 4492 generic.go:334] "Generic (PLEG): container finished" podID="026c3325-a592-4828-8e4f-08bcb790014a" containerID="46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525" exitCode=0 Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.573782 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" event={"ID":"026c3325-a592-4828-8e4f-08bcb790014a","Type":"ContainerDied","Data":"46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.575939 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-hjxcm" event={"ID":"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab","Type":"ContainerStarted","Data":"f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.576005 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-hjxcm" event={"ID":"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab","Type":"ContainerStarted","Data":"bccb4d57beeee5303c564223c76e8ad4c53433493f29f5aaf1a21f989cd8bf3c"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.586419 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.590946 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.590975 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.590985 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.591002 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.591012 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:51Z","lastTransitionTime":"2025-11-26T06:48:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.597299 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.609429 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.620421 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.631107 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.640219 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.648923 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.656975 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.669214 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.679137 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.686194 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.696435 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.696609 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.696630 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.696640 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.696653 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.696662 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:51Z","lastTransitionTime":"2025-11-26T06:48:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.705220 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.714296 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.724305 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.733363 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.740973 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.749434 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.758895 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.767802 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.775824 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.783184 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.791744 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.798444 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.798472 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.798484 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.798499 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.798507 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:51Z","lastTransitionTime":"2025-11-26T06:48:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.799651 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.806250 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.814399 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.822154 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.834159 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:51Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.900443 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.900479 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.900489 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.900507 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:51 crc kubenswrapper[4492]: I1126 06:48:51.900519 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:51Z","lastTransitionTime":"2025-11-26T06:48:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.002416 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.002449 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.002459 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.002474 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.002483 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.104555 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.104588 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.104596 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.104609 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.104619 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.206647 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.206683 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.206694 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.206713 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.206722 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.308767 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.308800 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.308810 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.308825 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.308835 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.412218 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.412243 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.412251 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.412264 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.412275 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.437920 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:52 crc kubenswrapper[4492]: E1126 06:48:52.438018 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.438149 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:52 crc kubenswrapper[4492]: E1126 06:48:52.438336 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.514670 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.514700 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.514712 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.514726 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.514735 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.582940 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.584626 4492 generic.go:334] "Generic (PLEG): container finished" podID="026c3325-a592-4828-8e4f-08bcb790014a" containerID="233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a" exitCode=0 Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.584668 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" event={"ID":"026c3325-a592-4828-8e4f-08bcb790014a","Type":"ContainerDied","Data":"233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.599882 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.616800 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.616823 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.616833 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.616846 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.616855 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.623384 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.641484 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.664839 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.674811 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.686923 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.697229 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.705645 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.716146 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.719430 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.719549 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.719638 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.719724 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.719817 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.725139 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.742415 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.757393 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.766151 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.777826 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:52Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.821923 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.822192 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.822209 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.822224 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.822233 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.923942 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.923974 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.923983 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.923998 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:52 crc kubenswrapper[4492]: I1126 06:48:52.924008 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:52Z","lastTransitionTime":"2025-11-26T06:48:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.026738 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.026769 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.026779 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.026793 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.026801 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.130863 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.130908 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.130919 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.130937 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.130947 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.233223 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.233291 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.233308 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.233333 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.233346 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.335810 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.335855 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.335866 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.335882 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.335893 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.437873 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.438024 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.438063 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.438073 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: E1126 06:48:53.438037 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.438087 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.438130 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.539861 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.539903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.539912 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.539928 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.539940 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.591724 4492 generic.go:334] "Generic (PLEG): container finished" podID="026c3325-a592-4828-8e4f-08bcb790014a" containerID="1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68" exitCode=0 Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.591765 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" event={"ID":"026c3325-a592-4828-8e4f-08bcb790014a","Type":"ContainerDied","Data":"1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.603022 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.614106 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.624465 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.635511 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.643530 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.643562 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.643571 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.643584 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.643595 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.645627 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.655886 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.668827 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.677874 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.686432 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.697504 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.705104 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.712836 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.725345 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.736412 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:53Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.745194 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.745221 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.745232 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.745247 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.745283 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.847076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.847107 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.847115 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.847127 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.847139 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.949532 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.949570 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.949579 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.949598 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:53 crc kubenswrapper[4492]: I1126 06:48:53.949608 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:53Z","lastTransitionTime":"2025-11-26T06:48:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.052146 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.052196 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.052209 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.052225 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.052235 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.154065 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.154100 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.154110 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.154127 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.154139 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.255834 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.255863 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.255871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.255889 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.255898 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.357386 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.357418 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.357428 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.357441 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.357450 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.438088 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:54 crc kubenswrapper[4492]: E1126 06:48:54.438231 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.438331 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:54 crc kubenswrapper[4492]: E1126 06:48:54.438441 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.451375 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.459679 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.459714 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.459726 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.459740 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.459750 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.461085 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.469744 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.479237 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.492633 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.504049 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.515306 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.526698 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.546744 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.559150 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.565423 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.565458 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.565468 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.565488 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.565503 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.570148 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.580963 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.592347 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.602578 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.602647 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.606039 4492 generic.go:334] "Generic (PLEG): container finished" podID="026c3325-a592-4828-8e4f-08bcb790014a" containerID="39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770" exitCode=0 Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.606154 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" event={"ID":"026c3325-a592-4828-8e4f-08bcb790014a","Type":"ContainerDied","Data":"39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.613756 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.622822 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.636838 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.650706 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.659092 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.668546 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.668618 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.668631 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.668649 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.668678 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.669775 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.679095 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.689041 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.699467 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.706748 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.715900 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.726649 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.735558 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.745011 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.755119 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.767186 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.770838 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.770870 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.770881 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.770895 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.770907 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.776793 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.786411 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.795692 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.805247 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.812979 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.822361 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.832735 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.840966 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.854141 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.866231 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.873324 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.873358 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.873367 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.873384 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.873396 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.875604 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.885767 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.975527 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.975558 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.975571 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.975587 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:54 crc kubenswrapper[4492]: I1126 06:48:54.975606 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:54Z","lastTransitionTime":"2025-11-26T06:48:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.030112 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.030144 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.030154 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.030182 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.030194 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: E1126 06:48:55.042651 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.045734 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.045765 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.045773 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.045785 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.045795 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: E1126 06:48:55.057653 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.060290 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.060320 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.060331 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.060345 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.060353 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: E1126 06:48:55.069546 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.072726 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.072762 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.072772 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.072788 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.072798 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: E1126 06:48:55.082464 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.085297 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.085323 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.085333 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.085346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.085355 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: E1126 06:48:55.094343 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: E1126 06:48:55.094448 4492 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.095684 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.095767 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.095827 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.095895 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.095949 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.198524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.198980 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.199060 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.199136 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.199234 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.301229 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.301271 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.301281 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.301293 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.301306 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.403502 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.403532 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.403543 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.403555 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.403568 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.437876 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:55 crc kubenswrapper[4492]: E1126 06:48:55.437980 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.506350 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.506385 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.506394 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.506421 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.506431 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.608332 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.608394 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.608404 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.608415 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.608425 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.612714 4492 generic.go:334] "Generic (PLEG): container finished" podID="026c3325-a592-4828-8e4f-08bcb790014a" containerID="b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926" exitCode=0 Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.612784 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" event={"ID":"026c3325-a592-4828-8e4f-08bcb790014a","Type":"ContainerDied","Data":"b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.612849 4492 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.613496 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.613542 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.625263 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.634601 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.635344 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.637218 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.644610 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.654207 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.662619 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.669997 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.679945 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.688862 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.703227 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.710927 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.710951 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.710959 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.710989 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.710998 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.714105 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.722508 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.730800 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.739381 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.750639 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.760674 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.771585 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.779194 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.787926 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.797433 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.805388 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.812737 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.812766 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.812776 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.812788 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.812798 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.814695 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.824029 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.831365 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.840509 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.849514 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.857577 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.870312 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.880654 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.914336 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.914367 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.914377 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.914390 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.914400 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:55Z","lastTransitionTime":"2025-11-26T06:48:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.919852 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.929622 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.936541 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.943535 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.951626 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.959811 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.968142 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.975851 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.983360 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:55 crc kubenswrapper[4492]: I1126 06:48:55.993348 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:55Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.004469 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.016027 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.016078 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.016089 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.016105 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.016117 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.036935 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.070017 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.105243 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.117891 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.117930 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.117940 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.117955 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.117965 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.139777 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.219960 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.220003 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.220016 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.220034 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.220049 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.323012 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.323062 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.323073 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.323089 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.323100 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.425933 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.425976 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.425988 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.426006 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.426018 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.437577 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.437604 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:56 crc kubenswrapper[4492]: E1126 06:48:56.437690 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:48:56 crc kubenswrapper[4492]: E1126 06:48:56.437775 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.527716 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.527755 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.527769 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.527785 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.527797 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.619863 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" event={"ID":"026c3325-a592-4828-8e4f-08bcb790014a","Type":"ContainerStarted","Data":"7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.621630 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/0.log" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.624558 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6" exitCode=1 Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.624598 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.625081 4492 scope.go:117] "RemoveContainer" containerID="38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.630069 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.630098 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.630109 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.630124 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.630135 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.633158 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.647483 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.659239 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.671198 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.681510 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.693099 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.704294 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.716689 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.732315 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.732563 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.732699 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.732775 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.732995 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.738158 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.754362 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.761571 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.769530 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.778355 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.788768 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.796410 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.811344 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.821208 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.836026 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.836064 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.836073 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.836090 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.836100 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.862925 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.903701 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.938237 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.938275 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.938286 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.938304 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.938316 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:56Z","lastTransitionTime":"2025-11-26T06:48:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.949262 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:56 crc kubenswrapper[4492]: I1126 06:48:56.983410 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.034501 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.039883 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.039916 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.039926 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.039944 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.039955 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.070431 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.103342 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.142362 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.142396 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.142406 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.142423 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.142433 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.142844 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.183040 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.226883 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"message\\\":\\\"ed to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:48:56.583706 5704 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1126 06:48:56.583683 5704 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-operator-lifecycle-manager/catalog-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"78f6184b-c7cf-436d-8cbb-4b31f8af75e8\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.244731 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.244781 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.244792 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.244813 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.244826 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.265780 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.346584 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.346625 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.346635 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.346651 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.346659 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.437734 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:57 crc kubenswrapper[4492]: E1126 06:48:57.437898 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.448726 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.448768 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.448793 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.448824 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.448835 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.552283 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.552329 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.552343 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.552365 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.552376 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.631963 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/1.log" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.632724 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/0.log" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.635881 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66" exitCode=1 Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.635935 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.636011 4492 scope.go:117] "RemoveContainer" containerID="38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.637261 4492 scope.go:117] "RemoveContainer" containerID="3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66" Nov 26 06:48:57 crc kubenswrapper[4492]: E1126 06:48:57.637541 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.656420 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.656457 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.656469 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.656491 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.656506 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.662869 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38e3e63cfede7c1fc9951627a0d2da11df52468d38ba6eed25404ef2f2587da6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"message\\\":\\\"ed to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:56Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:48:56.583706 5704 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nI1126 06:48:56.583683 5704 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-operator-lifecycle-manager/catalog-operator-metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"78f6184b-c7cf-436d-8cbb-4b31f8af75e8\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:57Z\\\",\\\"message\\\":\\\"4490 5849 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314640 5849 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.314697 5849 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314935 5849 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.315204 5849 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:48:57.323801 5849 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1126 06:48:57.323821 5849 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1126 06:48:57.323859 5849 ovnkube.go:599] Stopped ovnkube\\\\nI1126 06:48:57.323888 5849 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 06:48:57.323985 5849 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.677400 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.687980 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.700231 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.708607 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.718267 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.730162 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.741457 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.751350 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.758677 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.758710 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.758719 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.758744 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.758756 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.763113 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.772030 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.785602 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.794985 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.822373 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.861397 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.861430 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.861441 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.861454 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.861464 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.963297 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.963339 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.963348 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.963363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:57 crc kubenswrapper[4492]: I1126 06:48:57.963374 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:57Z","lastTransitionTime":"2025-11-26T06:48:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.065736 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.065779 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.065790 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.065807 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.065821 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.132301 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132374 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:49:14.132343248 +0000 UTC m=+50.016231545 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.132466 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.132505 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.132540 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.132570 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132681 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132715 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132726 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132749 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132761 4492 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132772 4492 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132731 4492 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132809 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:14.132792882 +0000 UTC m=+50.016681181 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132858 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:14.132848267 +0000 UTC m=+50.016736565 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.132872 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:14.13286539 +0000 UTC m=+50.016753687 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.133115 4492 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.133285 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:14.133262726 +0000 UTC m=+50.017151025 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.168116 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.168233 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.168296 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.168356 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.168407 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.271276 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.271314 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.271323 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.271339 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.271349 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.373424 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.373917 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.373985 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.374066 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.374122 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.438578 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.438672 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.438863 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.438774 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.475930 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.476033 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.476120 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.476194 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.476262 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.578825 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.578890 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.578901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.578919 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.578931 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.646699 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/1.log" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.649532 4492 scope.go:117] "RemoveContainer" containerID="3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66" Nov 26 06:48:58 crc kubenswrapper[4492]: E1126 06:48:58.649682 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.660813 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.671745 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.681158 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.681209 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.681220 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.681237 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.681248 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.682535 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.690933 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.700127 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.708234 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.714870 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.723762 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.732346 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.740285 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.754040 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:57Z\\\",\\\"message\\\":\\\"4490 5849 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314640 5849 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.314697 5849 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314935 5849 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.315204 5849 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:48:57.323801 5849 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1126 06:48:57.323821 5849 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1126 06:48:57.323859 5849 ovnkube.go:599] Stopped ovnkube\\\\nI1126 06:48:57.323888 5849 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 06:48:57.323985 5849 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.762108 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.770355 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.780793 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:48:58Z is after 2025-08-24T17:21:41Z" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.783515 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.783541 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.783553 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.783567 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.783577 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.885719 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.885746 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.885758 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.885771 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.885781 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.988201 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.988261 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.988274 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.988295 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:58 crc kubenswrapper[4492]: I1126 06:48:58.988307 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:58Z","lastTransitionTime":"2025-11-26T06:48:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.089771 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.089803 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.089815 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.089826 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.089835 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:59Z","lastTransitionTime":"2025-11-26T06:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.192120 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.192156 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.192168 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.192199 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.192208 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:59Z","lastTransitionTime":"2025-11-26T06:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.299453 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.299484 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.299494 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.299510 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.299521 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:59Z","lastTransitionTime":"2025-11-26T06:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.401757 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.401896 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.401911 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.401933 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.401945 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:59Z","lastTransitionTime":"2025-11-26T06:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.438190 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:48:59 crc kubenswrapper[4492]: E1126 06:48:59.438315 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.503771 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.503797 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.503806 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.503818 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.503828 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:59Z","lastTransitionTime":"2025-11-26T06:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.605491 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.605524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.605533 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.605546 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.605556 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:59Z","lastTransitionTime":"2025-11-26T06:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.708195 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.708228 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.708238 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.708250 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.708258 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:59Z","lastTransitionTime":"2025-11-26T06:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.810241 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.810269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.810278 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.810290 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.810298 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:59Z","lastTransitionTime":"2025-11-26T06:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.911857 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.912002 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.912076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.912150 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:48:59 crc kubenswrapper[4492]: I1126 06:48:59.912231 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:48:59Z","lastTransitionTime":"2025-11-26T06:48:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.014360 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.014393 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.014403 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.014413 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.014420 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.116314 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.116335 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.116344 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.116353 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.116360 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.217852 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.217936 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.217988 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.218035 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.218091 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.320626 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.320651 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.320660 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.320669 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.320677 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.423226 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.423276 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.423287 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.423304 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.423322 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.438379 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:00 crc kubenswrapper[4492]: E1126 06:49:00.438487 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.438749 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:00 crc kubenswrapper[4492]: E1126 06:49:00.438849 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.525270 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.525321 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.525331 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.525346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.525359 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.627224 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.627250 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.627258 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.627270 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.627278 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.729519 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.729572 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.729580 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.729597 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.729606 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.831334 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.831363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.831373 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.831385 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.831394 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.933042 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.933086 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.933099 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.933112 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:00 crc kubenswrapper[4492]: I1126 06:49:00.933121 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:00Z","lastTransitionTime":"2025-11-26T06:49:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.035570 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.035600 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.035608 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.035618 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.035627 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.137779 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.137851 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.137865 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.137906 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.137930 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.239880 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.239915 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.239925 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.239937 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.239947 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.341631 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.341657 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.341665 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.341678 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.341688 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.414006 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb"] Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.414614 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.416458 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.417208 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.428676 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.437505 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:01 crc kubenswrapper[4492]: E1126 06:49:01.437592 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.440194 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.443188 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.443213 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.443220 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.443230 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.443239 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.449878 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.457087 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7zdg\" (UniqueName: \"kubernetes.io/projected/2d23da2c-14b7-4671-b87e-7506855ca163-kube-api-access-c7zdg\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.457122 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2d23da2c-14b7-4671-b87e-7506855ca163-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.457146 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2d23da2c-14b7-4671-b87e-7506855ca163-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.457212 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2d23da2c-14b7-4671-b87e-7506855ca163-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.459836 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.468533 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.478048 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.486938 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.496872 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.507036 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.517142 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.525319 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.533624 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.543253 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.544948 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.544993 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.545004 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.545017 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.545027 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.552608 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.558080 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7zdg\" (UniqueName: \"kubernetes.io/projected/2d23da2c-14b7-4671-b87e-7506855ca163-kube-api-access-c7zdg\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.558118 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2d23da2c-14b7-4671-b87e-7506855ca163-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.558141 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2d23da2c-14b7-4671-b87e-7506855ca163-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.558206 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2d23da2c-14b7-4671-b87e-7506855ca163-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.558800 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2d23da2c-14b7-4671-b87e-7506855ca163-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.559123 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2d23da2c-14b7-4671-b87e-7506855ca163-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.562970 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2d23da2c-14b7-4671-b87e-7506855ca163-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.565938 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:57Z\\\",\\\"message\\\":\\\"4490 5849 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314640 5849 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.314697 5849 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314935 5849 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.315204 5849 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:48:57.323801 5849 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1126 06:48:57.323821 5849 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1126 06:48:57.323859 5849 ovnkube.go:599] Stopped ovnkube\\\\nI1126 06:48:57.323888 5849 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 06:48:57.323985 5849 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:01Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.569944 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7zdg\" (UniqueName: \"kubernetes.io/projected/2d23da2c-14b7-4671-b87e-7506855ca163-kube-api-access-c7zdg\") pod \"ovnkube-control-plane-749d76644c-2gwwb\" (UID: \"2d23da2c-14b7-4671-b87e-7506855ca163\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.647583 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.647614 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.647624 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.647642 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.647654 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.723819 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" Nov 26 06:49:01 crc kubenswrapper[4492]: W1126 06:49:01.734914 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d23da2c_14b7_4671_b87e_7506855ca163.slice/crio-b8d3ab3628447187438c838e4472d9437a8bd3d32bb92d8bb7ee2c0e121056a0 WatchSource:0}: Error finding container b8d3ab3628447187438c838e4472d9437a8bd3d32bb92d8bb7ee2c0e121056a0: Status 404 returned error can't find the container with id b8d3ab3628447187438c838e4472d9437a8bd3d32bb92d8bb7ee2c0e121056a0 Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.750470 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.750510 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.750520 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.750533 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.750543 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.852745 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.852777 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.852794 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.852808 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.852821 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.955625 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.955670 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.955681 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.955696 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:01 crc kubenswrapper[4492]: I1126 06:49:01.955706 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:01Z","lastTransitionTime":"2025-11-26T06:49:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.061126 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.061166 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.061195 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.061211 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.061221 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.123573 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-s4gtb"] Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.124131 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:02 crc kubenswrapper[4492]: E1126 06:49:02.124220 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.134421 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.143370 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.152241 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.162248 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.163250 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.163274 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.163284 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.163301 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.163327 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.173027 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.187870 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.201597 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.211947 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.221450 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.243101 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:57Z\\\",\\\"message\\\":\\\"4490 5849 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314640 5849 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.314697 5849 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314935 5849 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.315204 5849 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:48:57.323801 5849 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1126 06:48:57.323821 5849 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1126 06:48:57.323859 5849 ovnkube.go:599] Stopped ovnkube\\\\nI1126 06:48:57.323888 5849 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 06:48:57.323985 5849 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.252573 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.262549 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.262610 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk29d\" (UniqueName: \"kubernetes.io/projected/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-kube-api-access-hk29d\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.265450 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.266903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.266940 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.266953 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.266974 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.266991 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.274247 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.284408 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.293030 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.301907 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.364023 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.364088 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk29d\" (UniqueName: \"kubernetes.io/projected/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-kube-api-access-hk29d\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:02 crc kubenswrapper[4492]: E1126 06:49:02.364237 4492 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:02 crc kubenswrapper[4492]: E1126 06:49:02.364323 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs podName:1cc59fbe-82e1-406b-95b1-a26b6b8ef467 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:02.864298801 +0000 UTC m=+38.748187109 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs") pod "network-metrics-daemon-s4gtb" (UID: "1cc59fbe-82e1-406b-95b1-a26b6b8ef467") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.369581 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.369624 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.369635 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.369654 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.369666 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.379757 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk29d\" (UniqueName: \"kubernetes.io/projected/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-kube-api-access-hk29d\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.437852 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.437891 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:02 crc kubenswrapper[4492]: E1126 06:49:02.437983 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:02 crc kubenswrapper[4492]: E1126 06:49:02.438658 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.472296 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.472329 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.472341 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.472356 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.472366 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.574450 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.574842 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.574854 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.574868 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.574899 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.660846 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" event={"ID":"2d23da2c-14b7-4671-b87e-7506855ca163","Type":"ContainerStarted","Data":"23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.660921 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" event={"ID":"2d23da2c-14b7-4671-b87e-7506855ca163","Type":"ContainerStarted","Data":"56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.660957 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" event={"ID":"2d23da2c-14b7-4671-b87e-7506855ca163","Type":"ContainerStarted","Data":"b8d3ab3628447187438c838e4472d9437a8bd3d32bb92d8bb7ee2c0e121056a0"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.674847 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.676808 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.676859 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.676870 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.676883 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.676894 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.686450 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.696129 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.707217 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.716239 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.725528 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.735328 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.745435 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.752762 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.762292 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.771737 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.779158 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.779212 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.779224 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.779240 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.779250 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.784640 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:57Z\\\",\\\"message\\\":\\\"4490 5849 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314640 5849 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.314697 5849 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314935 5849 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.315204 5849 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:48:57.323801 5849 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1126 06:48:57.323821 5849 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1126 06:48:57.323859 5849 ovnkube.go:599] Stopped ovnkube\\\\nI1126 06:48:57.323888 5849 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 06:48:57.323985 5849 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.791987 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.801566 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.809349 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.816379 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:02Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.869200 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:02 crc kubenswrapper[4492]: E1126 06:49:02.869395 4492 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:02 crc kubenswrapper[4492]: E1126 06:49:02.869474 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs podName:1cc59fbe-82e1-406b-95b1-a26b6b8ef467 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:03.869458539 +0000 UTC m=+39.753346836 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs") pod "network-metrics-daemon-s4gtb" (UID: "1cc59fbe-82e1-406b-95b1-a26b6b8ef467") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.881612 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.881648 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.881660 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.881679 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.881692 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.983524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.983552 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.983561 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.983576 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:02 crc kubenswrapper[4492]: I1126 06:49:02.983587 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:02Z","lastTransitionTime":"2025-11-26T06:49:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.085261 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.085306 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.085318 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.085346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.085362 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:03Z","lastTransitionTime":"2025-11-26T06:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.187839 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.187877 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.187887 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.187905 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.187920 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:03Z","lastTransitionTime":"2025-11-26T06:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.291868 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.291921 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.291938 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.291953 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.291970 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:03Z","lastTransitionTime":"2025-11-26T06:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.393704 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.393739 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.393767 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.393781 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.393789 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:03Z","lastTransitionTime":"2025-11-26T06:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.437479 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:03 crc kubenswrapper[4492]: E1126 06:49:03.437622 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.495628 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.495980 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.496063 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.496136 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.496232 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:03Z","lastTransitionTime":"2025-11-26T06:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.598570 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.598697 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.598754 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.598803 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.598857 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:03Z","lastTransitionTime":"2025-11-26T06:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.701244 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.701278 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.701288 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.701300 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.701310 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:03Z","lastTransitionTime":"2025-11-26T06:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.803885 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.803920 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.803930 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.803946 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.803957 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:03Z","lastTransitionTime":"2025-11-26T06:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.879253 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:03 crc kubenswrapper[4492]: E1126 06:49:03.879374 4492 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:03 crc kubenswrapper[4492]: E1126 06:49:03.879437 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs podName:1cc59fbe-82e1-406b-95b1-a26b6b8ef467 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:05.879420521 +0000 UTC m=+41.763308819 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs") pod "network-metrics-daemon-s4gtb" (UID: "1cc59fbe-82e1-406b-95b1-a26b6b8ef467") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.906378 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.906432 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.906445 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.906457 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:03 crc kubenswrapper[4492]: I1126 06:49:03.906468 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:03Z","lastTransitionTime":"2025-11-26T06:49:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.008447 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.008470 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.008480 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.008496 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.008507 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.110736 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.110770 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.110778 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.110798 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.110808 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.213139 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.213217 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.213228 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.213242 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.213252 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.315672 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.315699 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.315709 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.315722 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.315731 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.417000 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.417024 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.417032 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.417041 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.417078 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.437590 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.437694 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.437628 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:04 crc kubenswrapper[4492]: E1126 06:49:04.437898 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:04 crc kubenswrapper[4492]: E1126 06:49:04.438021 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:04 crc kubenswrapper[4492]: E1126 06:49:04.438123 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.447241 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.457246 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.466210 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.474193 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.485694 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.494599 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.503444 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.513210 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.519308 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.519343 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.519353 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.519386 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.519394 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.524162 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.541374 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:57Z\\\",\\\"message\\\":\\\"4490 5849 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314640 5849 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.314697 5849 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314935 5849 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.315204 5849 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:48:57.323801 5849 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1126 06:48:57.323821 5849 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1126 06:48:57.323859 5849 ovnkube.go:599] Stopped ovnkube\\\\nI1126 06:48:57.323888 5849 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 06:48:57.323985 5849 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.548230 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.560079 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.567860 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.576378 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.584965 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.593465 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.621805 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.621844 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.621857 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.621875 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.621887 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.724319 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.724346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.724354 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.724367 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.724377 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.826696 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.826724 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.826735 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.826747 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.826758 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.928656 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.928689 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.928698 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.928708 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:04 crc kubenswrapper[4492]: I1126 06:49:04.928719 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:04Z","lastTransitionTime":"2025-11-26T06:49:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.030242 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.030276 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.030285 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.030320 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.030356 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.132531 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.132670 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.132729 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.132786 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.132840 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.198009 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.198046 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.198065 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.198079 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.198092 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: E1126 06:49:05.207906 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.211391 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.211414 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.211422 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.211433 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.211441 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: E1126 06:49:05.220813 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.223569 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.223598 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.223610 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.223620 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.223628 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: E1126 06:49:05.232091 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.234574 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.234627 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.234639 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.234650 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.234658 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: E1126 06:49:05.243238 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.247254 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.247370 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.247441 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.247502 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.247560 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: E1126 06:49:05.260321 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:05Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:05 crc kubenswrapper[4492]: E1126 06:49:05.260551 4492 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.261768 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.261795 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.261803 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.261814 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.261822 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.364154 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.364223 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.364236 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.364252 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.364263 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.437882 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:05 crc kubenswrapper[4492]: E1126 06:49:05.437988 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.466295 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.466334 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.466346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.466365 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.466377 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.567987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.568021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.568033 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.568047 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.568067 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.670465 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.670737 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.670812 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.670892 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.670945 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.773569 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.773608 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.773619 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.773632 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.773644 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.876281 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.876726 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.876786 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.876873 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.876940 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.897594 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:05 crc kubenswrapper[4492]: E1126 06:49:05.897866 4492 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:05 crc kubenswrapper[4492]: E1126 06:49:05.898031 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs podName:1cc59fbe-82e1-406b-95b1-a26b6b8ef467 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:09.898001752 +0000 UTC m=+45.781890060 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs") pod "network-metrics-daemon-s4gtb" (UID: "1cc59fbe-82e1-406b-95b1-a26b6b8ef467") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.979245 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.979284 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.979295 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.979310 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:05 crc kubenswrapper[4492]: I1126 06:49:05.979321 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:05Z","lastTransitionTime":"2025-11-26T06:49:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.081956 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.082005 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.082022 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.082046 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.082086 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:06Z","lastTransitionTime":"2025-11-26T06:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.183987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.184111 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.184197 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.184282 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.184355 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:06Z","lastTransitionTime":"2025-11-26T06:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.286212 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.286239 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.286249 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.286259 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.286266 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:06Z","lastTransitionTime":"2025-11-26T06:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.388751 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.388866 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.388941 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.389010 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.389084 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:06Z","lastTransitionTime":"2025-11-26T06:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.438516 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.438555 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.438555 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:06 crc kubenswrapper[4492]: E1126 06:49:06.438650 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:06 crc kubenswrapper[4492]: E1126 06:49:06.438772 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:06 crc kubenswrapper[4492]: E1126 06:49:06.438857 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.491359 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.491391 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.491404 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.491418 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.491428 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:06Z","lastTransitionTime":"2025-11-26T06:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.593757 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.593789 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.593799 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.593813 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.593828 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:06Z","lastTransitionTime":"2025-11-26T06:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.695954 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.695987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.695995 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.696006 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.696017 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:06Z","lastTransitionTime":"2025-11-26T06:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.798343 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.798378 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.798387 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.798402 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.798414 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:06Z","lastTransitionTime":"2025-11-26T06:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.900307 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.900348 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.900358 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.900374 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:06 crc kubenswrapper[4492]: I1126 06:49:06.900388 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:06Z","lastTransitionTime":"2025-11-26T06:49:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.002082 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.002112 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.002121 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.002130 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.002141 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.103978 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.103999 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.104008 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.104020 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.104028 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.205979 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.206008 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.206017 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.206028 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.206038 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.307781 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.307801 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.307808 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.307816 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.307823 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.409510 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.409531 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.409539 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.409549 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.409556 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.438037 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:07 crc kubenswrapper[4492]: E1126 06:49:07.438227 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.511633 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.511666 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.511674 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.511684 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.511692 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.613850 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.613887 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.613896 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.613908 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.613916 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.715768 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.715791 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.715801 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.715812 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.715819 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.817278 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.817302 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.817312 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.817321 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.817328 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.918957 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.918980 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.918987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.918998 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:07 crc kubenswrapper[4492]: I1126 06:49:07.919006 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:07Z","lastTransitionTime":"2025-11-26T06:49:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.021272 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.021309 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.021320 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.021333 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.021344 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.122881 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.122927 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.122939 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.122956 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.122968 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.225003 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.225038 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.225049 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.225074 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.225088 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.327259 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.327295 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.327306 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.327316 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.327326 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.429220 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.429252 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.429262 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.429273 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.429282 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.437701 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.437718 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.437701 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:08 crc kubenswrapper[4492]: E1126 06:49:08.437833 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:08 crc kubenswrapper[4492]: E1126 06:49:08.437889 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:08 crc kubenswrapper[4492]: E1126 06:49:08.437951 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.531531 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.531554 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.531563 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.531573 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.531581 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.633873 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.633912 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.633922 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.633935 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.633948 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.735870 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.735893 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.735903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.735915 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.735923 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.838380 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.838424 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.838440 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.838458 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.838469 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.940504 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.940547 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.940556 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.940573 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:08 crc kubenswrapper[4492]: I1126 06:49:08.940584 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:08Z","lastTransitionTime":"2025-11-26T06:49:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.042409 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.042443 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.042453 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.042467 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.042477 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.144467 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.144497 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.144507 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.144526 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.144536 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.246425 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.246465 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.246477 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.246494 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.246503 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.348241 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.348284 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.348297 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.348310 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.348324 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.438352 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:09 crc kubenswrapper[4492]: E1126 06:49:09.438454 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.439154 4492 scope.go:117] "RemoveContainer" containerID="3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.450698 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.450724 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.450732 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.450743 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.450753 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.553408 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.553538 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.553548 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.553562 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.553571 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.655910 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.655972 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.655981 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.655999 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.656025 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.690234 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/1.log" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.693036 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.693214 4492 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.707012 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.722236 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.735384 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.751570 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.758286 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.758407 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.758486 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.758553 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.758612 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.768561 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.779467 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.788159 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.796479 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.804945 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.814983 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.824218 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.836921 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:57Z\\\",\\\"message\\\":\\\"4490 5849 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314640 5849 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.314697 5849 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314935 5849 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.315204 5849 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:48:57.323801 5849 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1126 06:48:57.323821 5849 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1126 06:48:57.323859 5849 ovnkube.go:599] Stopped ovnkube\\\\nI1126 06:48:57.323888 5849 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 06:48:57.323985 5849 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.845233 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.855457 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.860604 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.860644 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.860654 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.860669 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.860679 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.864428 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.873195 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:09Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.933184 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:09 crc kubenswrapper[4492]: E1126 06:49:09.933368 4492 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:09 crc kubenswrapper[4492]: E1126 06:49:09.933454 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs podName:1cc59fbe-82e1-406b-95b1-a26b6b8ef467 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:17.933431175 +0000 UTC m=+53.817319473 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs") pod "network-metrics-daemon-s4gtb" (UID: "1cc59fbe-82e1-406b-95b1-a26b6b8ef467") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.963645 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.963683 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.963693 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.963733 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:09 crc kubenswrapper[4492]: I1126 06:49:09.963746 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:09Z","lastTransitionTime":"2025-11-26T06:49:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.065391 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.065451 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.065463 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.065480 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.065490 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.167904 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.167938 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.167950 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.167966 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.167979 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.270546 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.270613 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.270627 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.270649 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.270682 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.373019 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.373087 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.373099 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.373120 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.373134 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.437803 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.437866 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.437907 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:10 crc kubenswrapper[4492]: E1126 06:49:10.437983 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:10 crc kubenswrapper[4492]: E1126 06:49:10.438148 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:10 crc kubenswrapper[4492]: E1126 06:49:10.438324 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.474730 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.474760 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.474770 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.474781 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.474792 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.576742 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.576777 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.576792 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.576808 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.576820 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.678801 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.678833 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.678842 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.678854 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.678863 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.699775 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/2.log" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.700470 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/1.log" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.703023 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4" exitCode=1 Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.703079 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.703130 4492 scope.go:117] "RemoveContainer" containerID="3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.703748 4492 scope.go:117] "RemoveContainer" containerID="c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4" Nov 26 06:49:10 crc kubenswrapper[4492]: E1126 06:49:10.703886 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.715014 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.726157 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.735725 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.749387 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.759386 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.768578 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.777538 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.780398 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.780421 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.780431 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.780445 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.780456 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.784445 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.794448 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.803586 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.812193 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.824674 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e5f1a548541188ad7099cc52b4901b60d2cf3d21c19f41d3fa5f3a223a73d66\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:48:57Z\\\",\\\"message\\\":\\\"4490 5849 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314640 5849 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.314697 5849 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 06:48:57.314935 5849 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 06:48:57.315204 5849 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 06:48:57.323801 5849 shared_informer.go:320] Caches are synced for node-tracker-controller\\\\nI1126 06:48:57.323821 5849 services_controller.go:204] Setting up event handlers for services for network=default\\\\nI1126 06:48:57.323859 5849 ovnkube.go:599] Stopped ovnkube\\\\nI1126 06:48:57.323888 5849 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 06:48:57.323985 5849 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.832067 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.842399 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.851014 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.858596 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.882263 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.882371 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.882431 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.882499 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.882556 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.984350 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.984381 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.984390 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.984403 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:10 crc kubenswrapper[4492]: I1126 06:49:10.984415 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:10Z","lastTransitionTime":"2025-11-26T06:49:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.035926 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.086828 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.086871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.086883 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.086898 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.086910 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:11Z","lastTransitionTime":"2025-11-26T06:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.188930 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.188969 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.188979 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.188994 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.189007 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:11Z","lastTransitionTime":"2025-11-26T06:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.290875 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.290901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.290914 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.290924 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.290936 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:11Z","lastTransitionTime":"2025-11-26T06:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.393148 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.393192 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.393200 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.393210 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.393218 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:11Z","lastTransitionTime":"2025-11-26T06:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.438244 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:11 crc kubenswrapper[4492]: E1126 06:49:11.438375 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.494808 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.494863 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.494874 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.494890 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.494902 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:11Z","lastTransitionTime":"2025-11-26T06:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.596975 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.597005 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.597016 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.597028 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.597036 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:11Z","lastTransitionTime":"2025-11-26T06:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.698790 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.698826 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.698835 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.698848 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.698860 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:11Z","lastTransitionTime":"2025-11-26T06:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.707047 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/2.log" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.713522 4492 scope.go:117] "RemoveContainer" containerID="c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4" Nov 26 06:49:11 crc kubenswrapper[4492]: E1126 06:49:11.713680 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.722508 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.730415 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.740308 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.749430 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.757393 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.765805 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.774300 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.783374 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.792336 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.801130 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.801162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.801195 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.801214 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.801226 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:11Z","lastTransitionTime":"2025-11-26T06:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.806450 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.815362 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.825775 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.835396 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.846505 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.855508 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.864152 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:11Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.904337 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.904380 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.904393 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.904412 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.904426 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:11Z","lastTransitionTime":"2025-11-26T06:49:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:11 crc kubenswrapper[4492]: I1126 06:49:11.995102 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.003637 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.004353 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.006157 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.006206 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.006218 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.006232 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.006246 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.015644 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.025254 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.039437 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.050938 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.058633 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.066326 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.075672 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.085310 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.094902 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.101928 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.108080 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.108121 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.108132 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.108149 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.108184 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.112532 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.123778 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.132660 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.141766 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.152561 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:12Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.209880 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.209917 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.209928 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.209949 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.209963 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.312020 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.312047 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.312063 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.312082 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.312091 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.413932 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.413972 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.413980 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.413995 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.414007 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.438275 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:12 crc kubenswrapper[4492]: E1126 06:49:12.438395 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.438285 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:12 crc kubenswrapper[4492]: E1126 06:49:12.438488 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.438285 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:12 crc kubenswrapper[4492]: E1126 06:49:12.438559 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.516156 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.516207 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.516220 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.516232 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.516242 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.618287 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.618323 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.618332 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.618348 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.618360 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.719845 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.719877 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.719888 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.719900 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.719909 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.821920 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.821956 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.821967 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.821978 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.821987 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.923768 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.923797 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.923807 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.923819 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:12 crc kubenswrapper[4492]: I1126 06:49:12.923826 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:12Z","lastTransitionTime":"2025-11-26T06:49:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.025667 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.025704 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.025720 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.025740 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.025751 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.127363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.127408 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.127419 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.127437 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.127451 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.230156 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.230226 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.230239 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.230258 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.230272 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.332162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.332218 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.332227 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.332240 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.332250 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.434576 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.434687 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.434765 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.434846 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.434932 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.437939 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:13 crc kubenswrapper[4492]: E1126 06:49:13.438084 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.537433 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.537461 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.537470 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.537484 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.537494 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.638990 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.639021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.639030 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.639042 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.639052 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.740637 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.740693 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.740720 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.740731 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.740739 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.842139 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.842201 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.842211 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.842222 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.842232 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.944014 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.944044 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.944065 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.944079 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:13 crc kubenswrapper[4492]: I1126 06:49:13.944089 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:13Z","lastTransitionTime":"2025-11-26T06:49:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.046327 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.046422 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.046456 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.046469 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.046479 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.149213 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.149261 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.149273 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.149293 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.149306 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.167330 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.167391 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.167417 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.167454 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.167477 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167610 4492 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167678 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:46.167658181 +0000 UTC m=+82.051546479 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167728 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:49:46.167713385 +0000 UTC m=+82.051601683 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167852 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167888 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167905 4492 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167969 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:46.167948688 +0000 UTC m=+82.051836986 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167852 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167994 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.168002 4492 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.168026 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:46.168018008 +0000 UTC m=+82.051906296 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.167868 4492 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.168070 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:46.168049907 +0000 UTC m=+82.051938196 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.251633 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.251658 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.251667 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.251680 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.251689 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.353484 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.353521 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.353531 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.353545 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.353553 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.437819 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.437953 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.437996 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.438032 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.438074 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:14 crc kubenswrapper[4492]: E1126 06:49:14.438095 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.448991 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.455016 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.455046 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.455061 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.455072 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.455085 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.458497 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.466891 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.475862 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.484486 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.495016 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.504294 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.513081 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.521261 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.535098 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.542891 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.553947 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.557553 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.557596 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.557609 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.557631 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.557644 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.564037 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.572011 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.581547 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.589518 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.598976 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:14Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.660493 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.660530 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.660540 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.660554 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.660565 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.762903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.762964 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.762976 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.762999 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.763026 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.865002 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.865040 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.865051 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.865075 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.865090 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.967360 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.967398 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.967410 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.967423 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:14 crc kubenswrapper[4492]: I1126 06:49:14.967434 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:14Z","lastTransitionTime":"2025-11-26T06:49:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.069080 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.069118 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.069131 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.069150 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.069163 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.171225 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.171268 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.171278 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.171293 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.171308 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.273220 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.273283 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.273294 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.273311 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.273321 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.374992 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.375024 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.375034 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.375046 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.375066 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.438486 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:15 crc kubenswrapper[4492]: E1126 06:49:15.438625 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.477723 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.477759 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.477769 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.477782 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.477793 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.538799 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.538864 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.538882 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.538898 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.538908 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: E1126 06:49:15.549532 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:15Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.552925 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.552965 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.553023 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.553038 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.553047 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: E1126 06:49:15.563242 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:15Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.566269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.566324 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.566337 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.566349 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.566358 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: E1126 06:49:15.575313 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:15Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.578062 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.578103 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.578115 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.578132 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.578143 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: E1126 06:49:15.587495 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:15Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.590293 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.590332 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.590362 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.590376 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.590386 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: E1126 06:49:15.599382 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:15Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:15 crc kubenswrapper[4492]: E1126 06:49:15.599490 4492 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.600698 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.600739 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.600754 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.600777 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.600792 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.703359 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.703496 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.703564 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.703633 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.703701 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.805590 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.805627 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.805639 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.805657 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.805669 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.907978 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.908017 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.908026 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.908071 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:15 crc kubenswrapper[4492]: I1126 06:49:15.908081 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:15Z","lastTransitionTime":"2025-11-26T06:49:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.009853 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.009898 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.009909 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.009927 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.009938 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.113235 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.113265 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.113273 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.113286 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.113295 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.214994 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.215048 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.215067 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.215079 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.215091 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.317515 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.317551 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.317580 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.317595 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.317604 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.419336 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.419374 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.419382 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.419398 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.419409 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.437958 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.437985 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.437972 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:16 crc kubenswrapper[4492]: E1126 06:49:16.438277 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:16 crc kubenswrapper[4492]: E1126 06:49:16.438099 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:16 crc kubenswrapper[4492]: E1126 06:49:16.438491 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.521097 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.521135 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.521145 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.521188 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.521203 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.623417 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.623476 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.623487 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.623501 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.623517 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.725820 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.725855 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.725867 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.725879 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.725888 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.827603 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.827655 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.827668 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.827684 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.827713 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.928956 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.928980 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.928990 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.929003 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:16 crc kubenswrapper[4492]: I1126 06:49:16.929012 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:16Z","lastTransitionTime":"2025-11-26T06:49:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.030844 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.030900 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.030911 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.030922 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.030932 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.133235 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.133276 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.133285 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.133299 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.133312 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.235535 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.235569 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.235580 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.235596 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.235606 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.337973 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.338033 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.338046 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.338077 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.338090 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.438401 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:17 crc kubenswrapper[4492]: E1126 06:49:17.438572 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.440108 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.440162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.440195 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.440209 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.440222 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.542838 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.542892 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.542908 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.542929 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.542943 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.645157 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.645222 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.645234 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.645250 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.645261 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.748087 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.748150 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.748162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.748198 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.748212 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.850957 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.851363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.851423 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.851489 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.851563 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.953433 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.953470 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.953480 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.953493 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:17 crc kubenswrapper[4492]: I1126 06:49:17.953504 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:17Z","lastTransitionTime":"2025-11-26T06:49:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.000956 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:18 crc kubenswrapper[4492]: E1126 06:49:18.001128 4492 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:18 crc kubenswrapper[4492]: E1126 06:49:18.001234 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs podName:1cc59fbe-82e1-406b-95b1-a26b6b8ef467 nodeName:}" failed. No retries permitted until 2025-11-26 06:49:34.001212553 +0000 UTC m=+69.885100841 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs") pod "network-metrics-daemon-s4gtb" (UID: "1cc59fbe-82e1-406b-95b1-a26b6b8ef467") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.055949 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.056003 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.056016 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.056031 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.056043 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.158029 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.158074 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.158084 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.158100 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.158111 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.260317 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.260362 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.260381 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.260399 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.260413 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.362132 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.362164 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.362189 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.362205 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.362214 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.438365 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.438396 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.438395 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:18 crc kubenswrapper[4492]: E1126 06:49:18.438486 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:18 crc kubenswrapper[4492]: E1126 06:49:18.438565 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:18 crc kubenswrapper[4492]: E1126 06:49:18.439019 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.463990 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.464022 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.464033 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.464052 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.464071 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.565828 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.565864 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.565874 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.565888 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.565899 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.667577 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.667605 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.667613 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.667624 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.667633 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.769497 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.769525 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.769535 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.769546 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.769556 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.871685 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.871712 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.871720 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.871733 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.871743 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.973354 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.973383 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.973392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.973405 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:18 crc kubenswrapper[4492]: I1126 06:49:18.973415 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:18Z","lastTransitionTime":"2025-11-26T06:49:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.075270 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.075299 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.075307 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.075317 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.075327 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.176991 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.177021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.177030 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.177041 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.177049 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.278635 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.278695 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.278706 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.278723 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.278734 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.381006 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.381040 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.381051 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.381073 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.381083 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.437803 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:19 crc kubenswrapper[4492]: E1126 06:49:19.437918 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.482531 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.482561 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.482572 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.482586 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.482597 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.584012 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.584043 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.584051 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.584068 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.584077 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.685686 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.685733 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.685743 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.685754 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.685763 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.787298 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.787327 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.787336 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.787348 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.787359 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.889407 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.889435 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.889443 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.889457 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.889467 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.991114 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.991145 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.991153 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.991164 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:19 crc kubenswrapper[4492]: I1126 06:49:19.991193 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:19Z","lastTransitionTime":"2025-11-26T06:49:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.093318 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.093350 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.093359 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.093371 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.093382 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:20Z","lastTransitionTime":"2025-11-26T06:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.195220 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.195257 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.195267 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.195286 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.195298 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:20Z","lastTransitionTime":"2025-11-26T06:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.296660 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.296690 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.296698 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.296709 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.296718 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:20Z","lastTransitionTime":"2025-11-26T06:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.397992 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.398020 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.398032 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.398043 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.398064 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:20Z","lastTransitionTime":"2025-11-26T06:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.437779 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.437840 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:20 crc kubenswrapper[4492]: E1126 06:49:20.437948 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.438117 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:20 crc kubenswrapper[4492]: E1126 06:49:20.438210 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:20 crc kubenswrapper[4492]: E1126 06:49:20.438268 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.500086 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.500113 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.500122 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.500133 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.500142 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:20Z","lastTransitionTime":"2025-11-26T06:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.601349 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.601375 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.601386 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.601398 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.601409 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:20Z","lastTransitionTime":"2025-11-26T06:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.703367 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.703390 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.703398 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.703409 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.703418 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:20Z","lastTransitionTime":"2025-11-26T06:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.805587 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.805639 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.805653 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.805670 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.805683 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:20Z","lastTransitionTime":"2025-11-26T06:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.907871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.907923 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.907933 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.907945 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:20 crc kubenswrapper[4492]: I1126 06:49:20.907954 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:20Z","lastTransitionTime":"2025-11-26T06:49:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.009609 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.009639 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.009648 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.009660 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.009667 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.112073 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.112105 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.112114 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.112138 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.112147 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.213770 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.213807 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.213818 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.213833 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.213843 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.315523 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.315553 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.315565 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.315577 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.315586 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.417844 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.417871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.417879 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.417890 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.417898 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.437614 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:21 crc kubenswrapper[4492]: E1126 06:49:21.437712 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.519235 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.519269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.519277 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.519286 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.519293 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.620343 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.620373 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.620383 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.620394 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.620405 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.722085 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.722125 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.722134 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.722149 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.722159 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.824429 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.824462 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.824471 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.824483 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.824492 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.925866 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.925904 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.925913 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.925927 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:21 crc kubenswrapper[4492]: I1126 06:49:21.925937 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:21Z","lastTransitionTime":"2025-11-26T06:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.028087 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.028133 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.028143 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.028160 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.028191 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.129778 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.129808 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.129815 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.129827 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.129835 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.231559 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.231589 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.231598 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.231609 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.231618 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.333298 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.333324 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.333331 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.333342 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.333351 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.434519 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.434570 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.434578 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.434590 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.434599 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.437867 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:22 crc kubenswrapper[4492]: E1126 06:49:22.437949 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.437864 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.437985 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:22 crc kubenswrapper[4492]: E1126 06:49:22.438082 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:22 crc kubenswrapper[4492]: E1126 06:49:22.438140 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.536684 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.536713 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.536721 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.536733 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.536741 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.638906 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.638942 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.638950 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.638965 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.638976 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.740555 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.740583 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.740592 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.740605 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.740614 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.841709 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.841734 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.841743 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.841754 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.841781 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.943668 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.943701 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.943709 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.943722 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:22 crc kubenswrapper[4492]: I1126 06:49:22.943733 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:22Z","lastTransitionTime":"2025-11-26T06:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.045228 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.045272 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.045283 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.045303 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.045316 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.147416 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.147626 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.147712 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.147776 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.147839 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.249283 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.249309 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.249317 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.249329 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.249337 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.351304 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.351337 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.351345 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.351357 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.351365 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.438244 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:23 crc kubenswrapper[4492]: E1126 06:49:23.438355 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.452675 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.452729 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.452740 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.452750 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.452759 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.554966 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.555018 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.555026 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.555038 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.555046 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.656892 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.656921 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.656929 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.656940 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.656947 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.759120 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.759145 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.759153 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.759164 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.759222 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.860432 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.860480 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.860489 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.860500 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.860509 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.962594 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.962627 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.962636 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.962648 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:23 crc kubenswrapper[4492]: I1126 06:49:23.962657 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:23Z","lastTransitionTime":"2025-11-26T06:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.064507 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.064545 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.064553 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.064567 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.064576 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.166168 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.166224 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.166232 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.166244 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.166254 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.267908 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.267961 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.267973 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.267987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.268002 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.369936 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.369969 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.369978 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.369987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.369997 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.437646 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.437668 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.437698 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:24 crc kubenswrapper[4492]: E1126 06:49:24.438004 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:24 crc kubenswrapper[4492]: E1126 06:49:24.438042 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:24 crc kubenswrapper[4492]: E1126 06:49:24.437920 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.448223 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.456241 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.463835 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.471994 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.472018 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.472133 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.472143 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.472185 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.472195 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.478463 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.485270 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.494109 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.505854 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.513070 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.521448 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.527625 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.535133 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.541994 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.553679 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.562584 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.568862 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.574019 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.574048 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.574080 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.574094 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.574102 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.575767 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:24Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.675329 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.675361 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.675370 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.675384 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.675395 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.777463 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.777492 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.777501 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.777514 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.777522 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.879776 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.879801 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.879809 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.879822 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.879830 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.981536 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.982096 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.982193 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.982274 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:24 crc kubenswrapper[4492]: I1126 06:49:24.982334 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:24Z","lastTransitionTime":"2025-11-26T06:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.083816 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.083864 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.083874 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.083888 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.083897 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.185390 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.185419 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.185428 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.185456 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.185466 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.287148 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.287194 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.287202 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.287216 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.287225 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.388876 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.388914 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.388925 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.388939 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.388948 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.438292 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:25 crc kubenswrapper[4492]: E1126 06:49:25.438380 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.438850 4492 scope.go:117] "RemoveContainer" containerID="c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4" Nov 26 06:49:25 crc kubenswrapper[4492]: E1126 06:49:25.439030 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.491133 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.491166 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.491191 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.491204 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.491213 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.592436 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.592463 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.592472 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.592483 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.592492 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.694192 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.694222 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.694230 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.694241 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.694249 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.796296 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.796340 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.796351 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.796365 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.796375 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.884432 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.884466 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.884476 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.884490 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.884500 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: E1126 06:49:25.893638 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.896363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.896392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.896400 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.896413 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.896421 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: E1126 06:49:25.904387 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.906906 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.906936 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.906947 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.906983 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.906993 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: E1126 06:49:25.919629 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.922161 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.922196 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.922206 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.922223 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.922232 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: E1126 06:49:25.930389 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.933765 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.933785 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.933793 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.933805 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.933813 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:25 crc kubenswrapper[4492]: E1126 06:49:25.941874 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:25Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:25 crc kubenswrapper[4492]: E1126 06:49:25.941971 4492 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.942881 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.942901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.942908 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.942919 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:25 crc kubenswrapper[4492]: I1126 06:49:25.942927 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:25Z","lastTransitionTime":"2025-11-26T06:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.045042 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.045092 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.045101 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.045116 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.045126 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.146675 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.146699 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.146707 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.146717 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.146725 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.247952 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.247974 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.247983 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.247993 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.248001 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.349193 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.349214 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.349223 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.349233 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.349240 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.437662 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:26 crc kubenswrapper[4492]: E1126 06:49:26.437747 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.437776 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.437662 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:26 crc kubenswrapper[4492]: E1126 06:49:26.437843 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:26 crc kubenswrapper[4492]: E1126 06:49:26.437889 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.450951 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.450983 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.451016 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.451026 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.451035 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.552653 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.552705 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.552716 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.552730 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.552738 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.654127 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.654152 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.654160 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.654189 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.654198 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.757863 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.757919 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.757928 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.757942 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.757975 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.860207 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.860228 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.860236 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.860256 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.860265 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.961846 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.961908 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.961919 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.961932 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:26 crc kubenswrapper[4492]: I1126 06:49:26.961944 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:26Z","lastTransitionTime":"2025-11-26T06:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.063081 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.063111 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.063119 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.063131 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.063141 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.164993 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.165012 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.165019 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.165028 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.165036 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.266901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.267027 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.267226 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.267394 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.267533 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.369489 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.369619 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.369705 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.369766 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.369825 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.437550 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:27 crc kubenswrapper[4492]: E1126 06:49:27.437700 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.471088 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.471211 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.471363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.471500 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.471586 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.572824 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.572937 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.573088 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.573236 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.573381 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.674824 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.674925 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.674987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.675045 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.675114 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.777123 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.777258 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.777331 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.777401 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.777460 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.879631 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.879662 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.879671 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.879700 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.879710 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.981977 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.982232 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.982329 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.982397 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:27 crc kubenswrapper[4492]: I1126 06:49:27.982458 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:27Z","lastTransitionTime":"2025-11-26T06:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.084601 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.084631 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.084639 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.084651 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.084662 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.186115 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.186148 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.186156 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.186186 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.186198 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.287786 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.287817 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.287825 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.287837 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.287845 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.389217 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.389269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.389279 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.389288 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.389296 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.437823 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:28 crc kubenswrapper[4492]: E1126 06:49:28.438002 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.437867 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.437832 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:28 crc kubenswrapper[4492]: E1126 06:49:28.438462 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:28 crc kubenswrapper[4492]: E1126 06:49:28.438518 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.490803 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.490902 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.490967 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.491027 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.491166 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.593024 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.593066 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.593075 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.593087 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.593097 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.694484 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.694605 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.694668 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.694720 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.694786 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.796240 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.796265 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.796274 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.796285 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.796293 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.897751 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.897799 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.897807 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.897816 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.897823 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.999568 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.999607 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.999617 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.999627 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:28 crc kubenswrapper[4492]: I1126 06:49:28.999634 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:28Z","lastTransitionTime":"2025-11-26T06:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.101270 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.101300 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.101309 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.101318 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.101326 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:29Z","lastTransitionTime":"2025-11-26T06:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.205834 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.205926 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.205988 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.206048 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.206115 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:29Z","lastTransitionTime":"2025-11-26T06:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.312042 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.312085 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.312095 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.312106 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.312116 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:29Z","lastTransitionTime":"2025-11-26T06:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.413746 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.413769 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.413776 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.413786 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.413793 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:29Z","lastTransitionTime":"2025-11-26T06:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.437452 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:29 crc kubenswrapper[4492]: E1126 06:49:29.437531 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.515025 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.515052 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.515068 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.515078 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.515085 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:29Z","lastTransitionTime":"2025-11-26T06:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.616885 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.616978 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.617049 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.617128 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.617214 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:29Z","lastTransitionTime":"2025-11-26T06:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.718529 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.718551 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.718559 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.718568 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.718576 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:29Z","lastTransitionTime":"2025-11-26T06:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.819618 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.819643 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.819652 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.819662 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.819670 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:29Z","lastTransitionTime":"2025-11-26T06:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.921699 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.921743 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.921753 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.921761 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:29 crc kubenswrapper[4492]: I1126 06:49:29.921768 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:29Z","lastTransitionTime":"2025-11-26T06:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.023545 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.023602 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.023611 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.023623 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.023631 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.125337 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.125364 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.125373 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.125385 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.125394 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.226945 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.226979 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.226987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.227000 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.227009 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.328700 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.328727 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.328736 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.328749 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.328759 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.430197 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.430229 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.430242 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.430254 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.430263 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.437488 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.437525 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:30 crc kubenswrapper[4492]: E1126 06:49:30.437578 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.437491 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:30 crc kubenswrapper[4492]: E1126 06:49:30.437730 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:30 crc kubenswrapper[4492]: E1126 06:49:30.437672 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.531781 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.531810 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.531827 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.531842 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.531850 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.633885 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.633917 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.633927 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.633938 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.633946 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.735351 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.735376 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.735385 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.735397 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.735406 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.836693 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.836820 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.836895 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.836953 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.837015 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.938663 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.938777 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.938966 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.939161 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:30 crc kubenswrapper[4492]: I1126 06:49:30.939337 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:30Z","lastTransitionTime":"2025-11-26T06:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.040648 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.040926 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.041018 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.041099 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.041195 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.143852 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.144204 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.144306 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.144397 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.144589 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.246768 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.246792 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.246803 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.246814 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.246822 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.348748 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.348785 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.348795 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.348810 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.348819 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.437447 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:31 crc kubenswrapper[4492]: E1126 06:49:31.437646 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.450617 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.450638 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.450647 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.450656 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.450665 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.551937 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.552074 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.552161 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.552257 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.552346 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.654202 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.654247 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.654257 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.654268 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.654276 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.755974 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.756004 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.756014 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.756025 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.756035 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.857780 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.857842 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.857855 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.857882 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.857895 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.959524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.959653 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.959718 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.959787 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:31 crc kubenswrapper[4492]: I1126 06:49:31.959847 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:31Z","lastTransitionTime":"2025-11-26T06:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.060953 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.061052 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.061124 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.061198 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.061334 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.162592 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.162942 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.163021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.163111 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.163198 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.265446 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.265578 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.265644 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.265702 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.265759 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.368145 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.368279 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.368352 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.368426 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.368484 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.437871 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.437901 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.437882 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:32 crc kubenswrapper[4492]: E1126 06:49:32.437978 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:32 crc kubenswrapper[4492]: E1126 06:49:32.438100 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:32 crc kubenswrapper[4492]: E1126 06:49:32.438239 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.470434 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.470482 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.470495 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.470516 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.470535 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.571960 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.572076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.572138 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.572230 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.572286 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.674117 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.674155 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.674202 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.674219 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.674229 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.776493 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.776531 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.776542 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.776558 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.776570 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.878660 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.878693 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.878703 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.878718 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.878728 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.980655 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.980703 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.980716 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.980732 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:32 crc kubenswrapper[4492]: I1126 06:49:32.980742 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:32Z","lastTransitionTime":"2025-11-26T06:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.082316 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.082373 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.082386 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.082399 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.082409 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.184551 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.184588 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.184598 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.184609 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.184618 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.286213 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.286248 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.286259 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.286273 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.286284 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.387835 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.387870 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.387881 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.387893 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.387902 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.437837 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:33 crc kubenswrapper[4492]: E1126 06:49:33.437939 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.489146 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.489254 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.489285 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.489301 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.489313 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.591466 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.591496 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.591506 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.591536 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.591556 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.693164 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.693224 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.693236 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.693252 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.693264 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.794659 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.794690 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.794704 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.794719 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.794730 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.897099 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.897142 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.897154 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.897194 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.897208 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.998961 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.999003 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.999018 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.999038 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:33 crc kubenswrapper[4492]: I1126 06:49:33.999050 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:33Z","lastTransitionTime":"2025-11-26T06:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.030330 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:34 crc kubenswrapper[4492]: E1126 06:49:34.030546 4492 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:34 crc kubenswrapper[4492]: E1126 06:49:34.030634 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs podName:1cc59fbe-82e1-406b-95b1-a26b6b8ef467 nodeName:}" failed. No retries permitted until 2025-11-26 06:50:06.030612367 +0000 UTC m=+101.914500665 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs") pod "network-metrics-daemon-s4gtb" (UID: "1cc59fbe-82e1-406b-95b1-a26b6b8ef467") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.100548 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.100577 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.100587 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.100603 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.100615 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:34Z","lastTransitionTime":"2025-11-26T06:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.202329 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.202365 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.202375 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.202390 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.202401 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:34Z","lastTransitionTime":"2025-11-26T06:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.304282 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.304309 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.304318 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.304331 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.304344 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:34Z","lastTransitionTime":"2025-11-26T06:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.406637 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.406667 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.406676 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.406687 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.406697 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:34Z","lastTransitionTime":"2025-11-26T06:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.438727 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:34 crc kubenswrapper[4492]: E1126 06:49:34.438852 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.439035 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.439110 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:34 crc kubenswrapper[4492]: E1126 06:49:34.439224 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:34 crc kubenswrapper[4492]: E1126 06:49:34.439364 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.449960 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.457078 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.465213 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.475344 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.488371 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.498964 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.507487 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.508037 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.508074 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.508085 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.508100 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.508111 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:34Z","lastTransitionTime":"2025-11-26T06:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.516610 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.524809 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.535451 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.543494 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.551078 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.558299 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.568163 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.579893 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.592460 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.600199 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:34Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.610487 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.610516 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.610574 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.610591 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.610601 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:34Z","lastTransitionTime":"2025-11-26T06:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.712482 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.712594 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.712766 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.712951 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.713143 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:34Z","lastTransitionTime":"2025-11-26T06:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.814682 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.814812 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.814888 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.814951 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.815005 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:34Z","lastTransitionTime":"2025-11-26T06:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.918250 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.918280 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.918290 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.918306 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:34 crc kubenswrapper[4492]: I1126 06:49:34.918317 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:34Z","lastTransitionTime":"2025-11-26T06:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.019486 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.019510 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.019519 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.019528 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.019538 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.120873 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.120905 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.120915 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.120924 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.120935 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.223272 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.223316 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.223328 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.223343 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.223353 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.325037 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.325097 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.325108 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.325363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.325373 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.426945 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.426974 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.426985 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.427008 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.427017 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.438315 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:35 crc kubenswrapper[4492]: E1126 06:49:35.438443 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.528509 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.528538 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.528546 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.528556 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.528564 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.630016 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.630062 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.630070 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.630079 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.630086 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.731783 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.731816 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.731824 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.731833 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.731843 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.779090 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5bshd_a471ac3f-0ac0-4110-94bb-194c0de0af26/kube-multus/0.log" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.779130 4492 generic.go:334] "Generic (PLEG): container finished" podID="a471ac3f-0ac0-4110-94bb-194c0de0af26" containerID="9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d" exitCode=1 Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.779153 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5bshd" event={"ID":"a471ac3f-0ac0-4110-94bb-194c0de0af26","Type":"ContainerDied","Data":"9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.779435 4492 scope.go:117] "RemoveContainer" containerID="9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.802370 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.813649 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.823725 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.834382 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.834472 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.834499 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.834519 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.834531 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.834540 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.843657 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.850657 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.858897 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.868419 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.877100 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.890380 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.898023 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.907922 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.915471 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.925949 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.934350 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.936556 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.936589 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.936600 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.936613 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.936629 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:35Z","lastTransitionTime":"2025-11-26T06:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.942293 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:35 crc kubenswrapper[4492]: I1126 06:49:35.950724 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"2025-11-26T06:48:49+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277\\\\n2025-11-26T06:48:49+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277 to /host/opt/cni/bin/\\\\n2025-11-26T06:48:50Z [verbose] multus-daemon started\\\\n2025-11-26T06:48:50Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:49:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:35Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.038479 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.038530 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.038545 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.038564 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.038597 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.141157 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.141334 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.141399 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.141461 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.141525 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.181552 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.181585 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.181596 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.181617 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.181628 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: E1126 06:49:36.194910 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.197802 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.197839 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.197850 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.197868 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.197879 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: E1126 06:49:36.207508 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.211101 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.211133 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.211142 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.211154 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.211163 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: E1126 06:49:36.219121 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.221620 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.221662 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.221674 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.221696 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.221707 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: E1126 06:49:36.230253 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.232830 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.232916 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.232993 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.233076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.233143 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: E1126 06:49:36.241242 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: E1126 06:49:36.241465 4492 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.243230 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.243278 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.243290 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.243302 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.243312 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.345066 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.345102 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.345111 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.345126 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.345136 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.438311 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.438337 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:36 crc kubenswrapper[4492]: E1126 06:49:36.438418 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.438444 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:36 crc kubenswrapper[4492]: E1126 06:49:36.439281 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:36 crc kubenswrapper[4492]: E1126 06:49:36.439530 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.446466 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.446492 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.446501 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.446513 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.446523 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.450366 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.547971 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.548000 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.548011 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.548022 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.548030 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.649683 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.649720 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.649729 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.649766 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.649778 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.751590 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.751619 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.751628 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.751639 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.751649 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.783259 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5bshd_a471ac3f-0ac0-4110-94bb-194c0de0af26/kube-multus/0.log" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.783435 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5bshd" event={"ID":"a471ac3f-0ac0-4110-94bb-194c0de0af26","Type":"ContainerStarted","Data":"d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.794009 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.803630 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.811764 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.822062 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.831286 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.839713 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.852147 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.853329 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.853356 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.853367 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.853382 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.853392 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.859632 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.869217 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.876455 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.888236 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.897982 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.904632 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.912050 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.918995 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2cbf75e9-4b7e-454b-bcff-ebb0f537bb0e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://390b1499bd9aae353a574f1d5ca4243dda7d4576837cf40a0118c53cff23ebdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.926942 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.934441 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.944030 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"2025-11-26T06:48:49+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277\\\\n2025-11-26T06:48:49+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277 to /host/opt/cni/bin/\\\\n2025-11-26T06:48:50Z [verbose] multus-daemon started\\\\n2025-11-26T06:48:50Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:49:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:36Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.955355 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.955383 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.955392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.955424 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:36 crc kubenswrapper[4492]: I1126 06:49:36.955434 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:36Z","lastTransitionTime":"2025-11-26T06:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.057381 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.057403 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.057412 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.057423 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.057430 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.158901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.158948 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.158958 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.158971 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.158980 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.260851 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.261232 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.261315 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.261388 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.261460 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.363163 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.363206 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.363215 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.363225 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.363234 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.438334 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:37 crc kubenswrapper[4492]: E1126 06:49:37.438688 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.438765 4492 scope.go:117] "RemoveContainer" containerID="c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.464760 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.464794 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.464805 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.464820 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.464832 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.566162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.566200 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.566209 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.566222 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.566232 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.668033 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.668067 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.668076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.668089 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.668101 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.770222 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.770250 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.770260 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.770273 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.770289 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.787909 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/2.log" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.790405 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.790744 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.800099 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2cbf75e9-4b7e-454b-bcff-ebb0f537bb0e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://390b1499bd9aae353a574f1d5ca4243dda7d4576837cf40a0118c53cff23ebdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.811692 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.820538 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.829229 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"2025-11-26T06:48:49+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277\\\\n2025-11-26T06:48:49+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277 to /host/opt/cni/bin/\\\\n2025-11-26T06:48:50Z [verbose] multus-daemon started\\\\n2025-11-26T06:48:50Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:49:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.838233 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.848624 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.858329 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.868069 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.871961 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.871991 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.872001 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.872016 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.872026 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.879288 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.889346 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.896567 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.905771 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.915571 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.928494 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.935594 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.945377 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.952906 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.960204 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:37Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.973479 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.973522 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.973535 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.973549 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:37 crc kubenswrapper[4492]: I1126 06:49:37.973561 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:37Z","lastTransitionTime":"2025-11-26T06:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.075148 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.075200 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.075211 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.075229 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.075237 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.177573 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.177608 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.177618 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.177631 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.177641 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.279242 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.279620 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.279699 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.279757 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.279807 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.381426 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.381450 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.381491 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.381505 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.381514 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.438395 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.438460 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.438411 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:38 crc kubenswrapper[4492]: E1126 06:49:38.438534 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:38 crc kubenswrapper[4492]: E1126 06:49:38.438621 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:38 crc kubenswrapper[4492]: E1126 06:49:38.438680 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.483569 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.483594 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.483603 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.483615 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.483625 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.585609 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.585633 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.585641 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.585652 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.585661 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.687431 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.687468 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.687477 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.687487 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.687495 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.788898 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.788926 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.788934 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.788943 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.788952 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.794294 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/3.log" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.794827 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/2.log" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.796932 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" exitCode=1 Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.797024 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.797125 4492 scope.go:117] "RemoveContainer" containerID="c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.797482 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:49:38 crc kubenswrapper[4492]: E1126 06:49:38.797624 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.807966 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2cbf75e9-4b7e-454b-bcff-ebb0f537bb0e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://390b1499bd9aae353a574f1d5ca4243dda7d4576837cf40a0118c53cff23ebdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.818364 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.826760 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.836069 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"2025-11-26T06:48:49+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277\\\\n2025-11-26T06:48:49+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277 to /host/opt/cni/bin/\\\\n2025-11-26T06:48:50Z [verbose] multus-daemon started\\\\n2025-11-26T06:48:50Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:49:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.844688 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.851483 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.859577 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.869723 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.878311 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.886665 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.890319 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.890345 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.890369 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.890414 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.890424 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.895160 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.903301 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.913649 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.922983 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.936218 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c02ed7c2507748e0a1171eb6e43daac324db91b4329f8ca3c2aa7cabac7205b4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:10Z\\\",\\\"message\\\":\\\"ator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:10.114199 6061 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:10Z is after 2025-08-24T17:21:41Z]\\\\nI1126 06:49:10.114196 6061 services_controller.go:451] Built service openshift-authentication-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-authentication-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-authentication-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, Empty\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:38Z\\\",\\\"message\\\":\\\"ts: [openshift-kube-controller-manager/kube-controller-manager-crc openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/iptables-alerter-4ln5h openshift-machine-config-operator/kube-rbac-proxy-crio-crc openshift-machine-config-operator/machine-config-daemon-6blv7 openshift-multus/multus-5bshd openshift-multus/network-metrics-daemon-s4gtb openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb openshift-image-registry/node-ca-hjxcm openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-node-lghgp openshift-multus/multus-additional-cni-plugins-nrzjd openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c]\\\\nI1126 06:49:38.114760 6374 services_controller.go:445] Built service openshift-marketplace/certified-operators LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:38.115033 6374 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initializatio\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.947117 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.954880 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.963200 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:38Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.992750 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.992788 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.992798 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.992812 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:38 crc kubenswrapper[4492]: I1126 06:49:38.992821 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:38Z","lastTransitionTime":"2025-11-26T06:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.095391 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.095427 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.095438 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.095453 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.095461 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:39Z","lastTransitionTime":"2025-11-26T06:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.197394 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.197433 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.197446 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.197463 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.197474 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:39Z","lastTransitionTime":"2025-11-26T06:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.299644 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.299671 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.299683 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.299697 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.299706 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:39Z","lastTransitionTime":"2025-11-26T06:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.401564 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.401594 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.401604 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.401618 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.401627 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:39Z","lastTransitionTime":"2025-11-26T06:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.438558 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:39 crc kubenswrapper[4492]: E1126 06:49:39.438698 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.504137 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.504188 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.504201 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.504215 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.504223 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:39Z","lastTransitionTime":"2025-11-26T06:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.606784 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.606824 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.606834 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.606850 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.606862 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:39Z","lastTransitionTime":"2025-11-26T06:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.708830 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.708855 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.708864 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.708875 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.708883 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:39Z","lastTransitionTime":"2025-11-26T06:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.801165 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/3.log" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.803675 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:49:39 crc kubenswrapper[4492]: E1126 06:49:39.803825 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.810736 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.810778 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.810787 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.810801 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.810811 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:39Z","lastTransitionTime":"2025-11-26T06:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.815931 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.823520 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.832547 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.840307 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2cbf75e9-4b7e-454b-bcff-ebb0f537bb0e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://390b1499bd9aae353a574f1d5ca4243dda7d4576837cf40a0118c53cff23ebdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.849689 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.857603 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.867848 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"2025-11-26T06:48:49+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277\\\\n2025-11-26T06:48:49+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277 to /host/opt/cni/bin/\\\\n2025-11-26T06:48:50Z [verbose] multus-daemon started\\\\n2025-11-26T06:48:50Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:49:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.876287 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.883260 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.890956 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.899953 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.908903 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.912431 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.912492 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.912502 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.912514 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.912523 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:39Z","lastTransitionTime":"2025-11-26T06:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.917427 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.925672 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.933011 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.941549 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.949922 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:39 crc kubenswrapper[4492]: I1126 06:49:39.962745 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:38Z\\\",\\\"message\\\":\\\"ts: [openshift-kube-controller-manager/kube-controller-manager-crc openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/iptables-alerter-4ln5h openshift-machine-config-operator/kube-rbac-proxy-crio-crc openshift-machine-config-operator/machine-config-daemon-6blv7 openshift-multus/multus-5bshd openshift-multus/network-metrics-daemon-s4gtb openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb openshift-image-registry/node-ca-hjxcm openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-node-lghgp openshift-multus/multus-additional-cni-plugins-nrzjd openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c]\\\\nI1126 06:49:38.114760 6374 services_controller.go:445] Built service openshift-marketplace/certified-operators LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:38.115033 6374 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initializatio\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:39Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.014024 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.014049 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.014070 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.014083 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.014106 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.116031 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.116073 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.116085 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.116101 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.116112 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.218590 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.218657 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.218678 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.218708 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.218726 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.320991 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.321032 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.321042 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.321068 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.321080 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.423470 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.423505 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.423520 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.423535 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.423548 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.438019 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:40 crc kubenswrapper[4492]: E1126 06:49:40.438137 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.438025 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.438141 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:40 crc kubenswrapper[4492]: E1126 06:49:40.438331 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:40 crc kubenswrapper[4492]: E1126 06:49:40.438447 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.525306 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.525332 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.525340 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.525350 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.525359 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.627553 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.627588 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.627601 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.627614 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.627623 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.729128 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.729160 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.729189 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.729199 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.729207 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.831731 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.831830 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.831907 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.831979 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.832035 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.933756 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.933834 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.933844 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.933857 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:40 crc kubenswrapper[4492]: I1126 06:49:40.933884 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:40Z","lastTransitionTime":"2025-11-26T06:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.035308 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.035336 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.035346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.035368 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.035384 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.137400 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.137429 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.137440 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.137455 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.137464 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.239767 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.239878 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.239942 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.240012 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.240080 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.342319 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.342632 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.342715 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.342782 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.342912 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.437918 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:41 crc kubenswrapper[4492]: E1126 06:49:41.438094 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.444918 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.444992 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.445063 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.445130 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.445209 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.546290 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.546317 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.546326 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.546338 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.546364 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.648012 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.648047 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.648064 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.648078 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.648087 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.749345 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.749381 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.749391 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.749403 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.749412 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.851070 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.851233 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.851300 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.851357 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.851413 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.952903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.952955 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.952965 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.952977 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:41 crc kubenswrapper[4492]: I1126 06:49:41.953005 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:41Z","lastTransitionTime":"2025-11-26T06:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.054971 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.055009 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.055020 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.055034 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.055044 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.156989 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.157042 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.157066 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.157082 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.157091 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.259492 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.259548 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.259560 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.259585 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.259596 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.361646 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.361685 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.361700 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.361711 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.361720 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.437470 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:42 crc kubenswrapper[4492]: E1126 06:49:42.437596 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.437481 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.437643 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:42 crc kubenswrapper[4492]: E1126 06:49:42.437692 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:42 crc kubenswrapper[4492]: E1126 06:49:42.437759 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.463494 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.463527 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.463538 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.463551 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.463562 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.564873 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.564914 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.564926 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.564943 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.564951 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.667120 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.667160 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.667185 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.667201 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.667213 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.769241 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.769472 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.769484 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.769499 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.769510 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.871921 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.872202 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.872213 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.872228 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.872237 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.974840 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.974903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.974919 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.974947 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:42 crc kubenswrapper[4492]: I1126 06:49:42.974961 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:42Z","lastTransitionTime":"2025-11-26T06:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.077127 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.077204 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.077216 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.077238 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.077250 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.179121 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.179154 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.179163 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.179192 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.179201 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.281151 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.281277 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.281437 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.281582 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.281743 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.384110 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.384493 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.384551 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.384608 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.384660 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.437854 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:43 crc kubenswrapper[4492]: E1126 06:49:43.437969 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.486454 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.486496 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.486508 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.486527 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.486541 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.588259 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.588294 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.588304 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.588318 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.588328 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.690374 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.690520 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.690581 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.690639 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.690687 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.793817 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.793850 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.793859 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.793876 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.793885 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.895745 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.895808 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.895818 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.895834 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.895846 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.997452 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.997485 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.997493 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.997509 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:43 crc kubenswrapper[4492]: I1126 06:49:43.997517 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:43Z","lastTransitionTime":"2025-11-26T06:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.099078 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.099140 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.099157 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.099209 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.099226 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:44Z","lastTransitionTime":"2025-11-26T06:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.201108 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.201140 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.201149 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.201162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.201183 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:44Z","lastTransitionTime":"2025-11-26T06:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.303039 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.303075 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.303084 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.303097 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.303108 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:44Z","lastTransitionTime":"2025-11-26T06:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.405441 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.405784 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.405843 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.405965 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.406073 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:44Z","lastTransitionTime":"2025-11-26T06:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.437736 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.437754 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.437765 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:44 crc kubenswrapper[4492]: E1126 06:49:44.437838 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:44 crc kubenswrapper[4492]: E1126 06:49:44.437956 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:44 crc kubenswrapper[4492]: E1126 06:49:44.438065 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.447486 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.456637 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.464973 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.473444 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.482831 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.491167 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.503823 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.507219 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.507255 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.507265 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.507280 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.507289 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:44Z","lastTransitionTime":"2025-11-26T06:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.515329 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.524238 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.537922 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:38Z\\\",\\\"message\\\":\\\"ts: [openshift-kube-controller-manager/kube-controller-manager-crc openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/iptables-alerter-4ln5h openshift-machine-config-operator/kube-rbac-proxy-crio-crc openshift-machine-config-operator/machine-config-daemon-6blv7 openshift-multus/multus-5bshd openshift-multus/network-metrics-daemon-s4gtb openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb openshift-image-registry/node-ca-hjxcm openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-node-lghgp openshift-multus/multus-additional-cni-plugins-nrzjd openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c]\\\\nI1126 06:49:38.114760 6374 services_controller.go:445] Built service openshift-marketplace/certified-operators LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:38.115033 6374 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initializatio\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.545112 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.554452 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.561804 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.569319 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.576021 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2cbf75e9-4b7e-454b-bcff-ebb0f537bb0e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://390b1499bd9aae353a574f1d5ca4243dda7d4576837cf40a0118c53cff23ebdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.585117 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.592863 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.602138 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"2025-11-26T06:48:49+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277\\\\n2025-11-26T06:48:49+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277 to /host/opt/cni/bin/\\\\n2025-11-26T06:48:50Z [verbose] multus-daemon started\\\\n2025-11-26T06:48:50Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:49:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:44Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.609466 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.609493 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.609505 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.609517 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.609527 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:44Z","lastTransitionTime":"2025-11-26T06:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.710849 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.710886 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.710898 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.710913 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.710925 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:44Z","lastTransitionTime":"2025-11-26T06:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.812508 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.812551 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.812564 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.812582 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.812595 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:44Z","lastTransitionTime":"2025-11-26T06:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.915433 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.915458 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.915466 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.915481 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:44 crc kubenswrapper[4492]: I1126 06:49:44.915489 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:44Z","lastTransitionTime":"2025-11-26T06:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.018084 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.018215 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.018311 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.018384 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.018440 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.120082 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.120104 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.120114 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.120126 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.120135 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.222632 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.222659 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.222669 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.222680 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.222688 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.324327 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.324351 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.324361 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.324378 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.324388 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.426387 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.426410 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.426421 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.426432 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.426441 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.438158 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:45 crc kubenswrapper[4492]: E1126 06:49:45.438285 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.527859 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.527901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.527916 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.527934 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.527948 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.629975 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.630092 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.630154 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.630237 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.630301 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.732004 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.732247 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.732313 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.732379 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.732437 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.834000 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.834034 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.834046 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.834065 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.834074 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.937102 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.937241 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.937300 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.937363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:45 crc kubenswrapper[4492]: I1126 06:49:45.937412 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:45Z","lastTransitionTime":"2025-11-26T06:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.039469 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.039522 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.039535 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.039556 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.039569 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.141217 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.141258 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.141269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.141282 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.141292 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.172818 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.172887 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.172915 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.172944 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:50.172917204 +0000 UTC m=+146.056805512 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.172984 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.173033 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173045 4492 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173122 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:50:50.173103323 +0000 UTC m=+146.056991621 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173140 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173192 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173208 4492 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173202 4492 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173270 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 06:50:50.173252343 +0000 UTC m=+146.057140642 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173219 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173293 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 06:50:50.173283221 +0000 UTC m=+146.057171529 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173313 4492 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173329 4492 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.173376 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 06:50:50.173360708 +0000 UTC m=+146.057249006 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.243633 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.243662 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.243672 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.243687 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.243697 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.345861 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.345888 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.345899 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.345910 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.345920 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.437580 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.437577 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.437701 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.437832 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.437970 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.438046 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.447975 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.448007 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.448017 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.448031 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.448043 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.549673 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.549808 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.549874 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.549945 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.550011 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.642343 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.642381 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.642391 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.642415 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.642424 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.651904 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.655549 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.655573 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.655582 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.655594 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.655603 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.665324 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.667849 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.667873 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.667881 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.667891 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.667898 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.675857 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.679134 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.679398 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.679523 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.679809 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.679872 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.691237 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.693829 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.693870 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.693880 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.693894 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.693903 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.703049 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:46Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:46 crc kubenswrapper[4492]: E1126 06:49:46.703188 4492 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.704470 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.704501 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.704511 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.704524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.704533 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.806507 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.806538 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.806547 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.806561 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.806570 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.908616 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.908679 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.908689 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.908702 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:46 crc kubenswrapper[4492]: I1126 06:49:46.908711 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:46Z","lastTransitionTime":"2025-11-26T06:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.009963 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.009999 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.010009 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.010022 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.010032 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.111873 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.111901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.111913 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.111925 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.111934 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.213488 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.213536 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.213545 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.213573 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.213581 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.315322 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.315360 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.315375 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.315393 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.315405 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.416904 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.416929 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.416936 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.416947 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.416956 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.437566 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:47 crc kubenswrapper[4492]: E1126 06:49:47.437652 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.518792 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.518834 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.518844 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.518857 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.518870 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.621605 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.621635 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.621644 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.621656 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.621665 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.724008 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.724048 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.724067 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.724082 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.724092 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.824979 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.825005 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.825015 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.825049 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.825069 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.926768 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.926790 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.926799 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.926809 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:47 crc kubenswrapper[4492]: I1126 06:49:47.926817 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:47Z","lastTransitionTime":"2025-11-26T06:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.028042 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.028076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.028084 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.028094 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.028104 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.130407 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.130457 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.130466 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.130487 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.130498 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.232894 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.232951 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.232966 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.232982 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.232995 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.334902 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.334939 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.334952 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.334968 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.334979 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.436925 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.436952 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.436960 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.436975 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.436984 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.438341 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:48 crc kubenswrapper[4492]: E1126 06:49:48.438429 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.438542 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.438675 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:48 crc kubenswrapper[4492]: E1126 06:49:48.438819 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:48 crc kubenswrapper[4492]: E1126 06:49:48.438976 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.538057 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.538090 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.538100 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.538113 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.538121 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.640269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.640304 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.640313 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.640325 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.640335 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.742022 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.742101 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.742119 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.742142 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.742157 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.843424 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.843456 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.843465 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.843478 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.843488 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.944795 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.944825 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.944835 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.944846 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:48 crc kubenswrapper[4492]: I1126 06:49:48.944859 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:48Z","lastTransitionTime":"2025-11-26T06:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.046323 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.046374 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.046390 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.046409 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.046421 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.148044 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.148072 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.148081 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.148092 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.148099 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.249258 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.249358 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.249419 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.249479 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.249534 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.351360 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.351394 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.351405 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.351419 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.351431 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.438087 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:49 crc kubenswrapper[4492]: E1126 06:49:49.438201 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.453557 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.453646 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.453738 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.453809 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.453877 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.555831 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.555878 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.555892 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.555909 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.555919 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.657859 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.657889 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.657896 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.657909 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.657923 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.759697 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.759734 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.759745 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.759758 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.759768 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.861442 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.861490 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.861503 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.861519 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.861531 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.963875 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.964032 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.964168 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.964294 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:49 crc kubenswrapper[4492]: I1126 06:49:49.964398 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:49Z","lastTransitionTime":"2025-11-26T06:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.066965 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.067076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.067087 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.067101 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.067114 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.169167 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.169214 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.169224 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.169235 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.169241 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.270808 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.270957 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.271041 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.271111 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.271207 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.372412 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.372444 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.372452 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.372463 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.372471 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.438464 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:50 crc kubenswrapper[4492]: E1126 06:49:50.438570 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.438667 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.438685 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:50 crc kubenswrapper[4492]: E1126 06:49:50.438862 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:50 crc kubenswrapper[4492]: E1126 06:49:50.438771 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.473815 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.473907 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.473965 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.474013 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.474076 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.575981 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.576035 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.576046 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.576069 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.576078 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.677739 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.677781 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.677789 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.677799 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.677807 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.779455 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.779557 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.779630 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.779693 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.779748 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.881194 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.881218 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.881226 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.881237 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.881246 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.982757 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.982782 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.982791 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.982800 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:50 crc kubenswrapper[4492]: I1126 06:49:50.982807 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:50Z","lastTransitionTime":"2025-11-26T06:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.084763 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.084792 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.084801 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.084812 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.084822 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:51Z","lastTransitionTime":"2025-11-26T06:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.186474 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.186511 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.186524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.186538 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.186548 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:51Z","lastTransitionTime":"2025-11-26T06:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.288512 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.288532 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.288544 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.288555 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.288563 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:51Z","lastTransitionTime":"2025-11-26T06:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.390743 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.390767 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.390776 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.390785 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.390792 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:51Z","lastTransitionTime":"2025-11-26T06:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.437862 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:51 crc kubenswrapper[4492]: E1126 06:49:51.438499 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.447374 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.492720 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.492752 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.492793 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.492813 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.492822 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:51Z","lastTransitionTime":"2025-11-26T06:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.595253 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.595305 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.595344 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.595360 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.595370 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:51Z","lastTransitionTime":"2025-11-26T06:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.696958 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.697236 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.697246 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.697256 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.697264 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:51Z","lastTransitionTime":"2025-11-26T06:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.798736 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.798788 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.798799 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.798810 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.798820 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:51Z","lastTransitionTime":"2025-11-26T06:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.900521 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.900553 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.900565 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.900577 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:51 crc kubenswrapper[4492]: I1126 06:49:51.900590 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:51Z","lastTransitionTime":"2025-11-26T06:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.002622 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.002677 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.002688 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.002701 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.002711 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.104394 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.104442 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.104453 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.104464 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.104472 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.205991 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.206036 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.206046 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.206072 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.206082 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.307706 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.307736 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.307745 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.307757 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.307765 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.409291 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.409330 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.409341 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.409358 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.409370 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.438198 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.438255 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:52 crc kubenswrapper[4492]: E1126 06:49:52.438305 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:52 crc kubenswrapper[4492]: E1126 06:49:52.438402 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.438472 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:52 crc kubenswrapper[4492]: E1126 06:49:52.439396 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.440072 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:49:52 crc kubenswrapper[4492]: E1126 06:49:52.440267 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.511392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.511422 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.511431 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.511445 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.511457 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.613435 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.613468 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.613478 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.613489 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.613497 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.714855 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.714885 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.714893 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.714902 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.714910 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.816244 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.816272 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.816281 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.816293 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.816300 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.917887 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.917914 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.917923 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.917935 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:52 crc kubenswrapper[4492]: I1126 06:49:52.917943 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:52Z","lastTransitionTime":"2025-11-26T06:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.019397 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.019447 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.019457 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.019468 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.019478 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.121137 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.121204 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.121214 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.121223 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.121231 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.223011 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.223064 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.223073 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.223084 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.223092 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.324997 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.325021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.325030 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.325040 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.325058 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.426970 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.427070 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.427134 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.427212 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.427275 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.437679 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:53 crc kubenswrapper[4492]: E1126 06:49:53.437884 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.528994 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.529014 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.529023 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.529032 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.529041 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.630599 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.630638 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.630648 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.630660 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.630671 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.732470 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.732500 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.732512 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.732524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.732532 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.834571 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.834630 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.834641 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.834652 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.834660 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.935982 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.936009 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.936018 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.936029 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:53 crc kubenswrapper[4492]: I1126 06:49:53.936037 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:53Z","lastTransitionTime":"2025-11-26T06:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.037983 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.038014 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.038024 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.038034 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.038044 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.139918 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.139947 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.139956 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.139983 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.139991 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.241895 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.241962 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.241976 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.242002 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.242016 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.344244 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.344281 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.344293 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.344308 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.344317 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.437917 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.437941 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.437977 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:54 crc kubenswrapper[4492]: E1126 06:49:54.438071 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:54 crc kubenswrapper[4492]: E1126 06:49:54.438153 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:54 crc kubenswrapper[4492]: E1126 06:49:54.438241 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.445752 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.445780 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.445790 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.445822 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.445840 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.447430 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.456389 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.464082 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.472781 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.480392 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.488611 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.494900 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.512336 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39bab0a-78f2-4734-b5f5-f5e382ae2523\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed249be3ee240d3abc137ebe93a7f0595fd30e86f3d566029d2aafada08f0b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d0d3a70003d231c58cf949db69367229e45c44813a969360fa4d22f17b676fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://711af18dbbce5875faa107da6007d9fa0fd99ba58c8e0e033ad956ada538a9c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd197c99b6d8e405f2023f3985d14579f2da73008cb6219bde9ad309cf0c7b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7689d03ba1a45b28d33ecb66a7451c4e3767d076eba9751dc438b44bf692b0e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4bd6fe7d38f84cfedbd45716bbb9bc9265e332d672766c4c85c5910f4c59ee2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bd6fe7d38f84cfedbd45716bbb9bc9265e332d672766c4c85c5910f4c59ee2d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f51b9153796b1b926d5f2c0bdc5d55d1be2e33a0df568da392b932178973ef64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f51b9153796b1b926d5f2c0bdc5d55d1be2e33a0df568da392b932178973ef64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8cfc778e8208b9ac4eecdcec38b72db997cdd2a68b19cc8e30bdde590da553fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfc778e8208b9ac4eecdcec38b72db997cdd2a68b19cc8e30bdde590da553fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.523349 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.530623 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.542668 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:38Z\\\",\\\"message\\\":\\\"ts: [openshift-kube-controller-manager/kube-controller-manager-crc openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/iptables-alerter-4ln5h openshift-machine-config-operator/kube-rbac-proxy-crio-crc openshift-machine-config-operator/machine-config-daemon-6blv7 openshift-multus/multus-5bshd openshift-multus/network-metrics-daemon-s4gtb openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb openshift-image-registry/node-ca-hjxcm openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-node-lghgp openshift-multus/multus-additional-cni-plugins-nrzjd openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c]\\\\nI1126 06:49:38.114760 6374 services_controller.go:445] Built service openshift-marketplace/certified-operators LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:38.115033 6374 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initializatio\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.547115 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.547260 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.547441 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.547612 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.547787 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.551004 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.559875 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.566399 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.573712 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.580377 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2cbf75e9-4b7e-454b-bcff-ebb0f537bb0e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://390b1499bd9aae353a574f1d5ca4243dda7d4576837cf40a0118c53cff23ebdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.589077 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.596210 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.616264 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"2025-11-26T06:48:49+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277\\\\n2025-11-26T06:48:49+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277 to /host/opt/cni/bin/\\\\n2025-11-26T06:48:50Z [verbose] multus-daemon started\\\\n2025-11-26T06:48:50Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:49:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:54Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.650217 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.650651 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.650733 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.650817 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.650902 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.752401 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.752443 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.752453 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.752466 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.752477 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.854458 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.854486 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.854494 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.854506 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.854514 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.955965 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.956008 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.956018 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.956033 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:54 crc kubenswrapper[4492]: I1126 06:49:54.956042 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:54Z","lastTransitionTime":"2025-11-26T06:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.057795 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.057824 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.057832 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.057842 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.057850 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.159624 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.159647 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.159655 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.159667 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.159676 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.261381 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.261405 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.261415 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.261425 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.261432 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.362892 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.362911 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.362918 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.362926 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.362932 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.438138 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:55 crc kubenswrapper[4492]: E1126 06:49:55.438241 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.464433 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.464524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.464588 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.464664 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.464726 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.566552 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.566575 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.566584 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.566594 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.566601 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.667617 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.667637 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.667645 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.667653 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.667667 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.769393 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.769415 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.769422 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.769431 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.769438 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.870901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.870955 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.870964 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.870974 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.870981 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.972504 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.972534 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.972541 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.972552 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:55 crc kubenswrapper[4492]: I1126 06:49:55.972559 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:55Z","lastTransitionTime":"2025-11-26T06:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.074233 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.074261 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.074270 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.074279 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.074287 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.176228 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.176259 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.176268 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.176277 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.176284 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.277462 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.277520 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.277528 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.277539 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.277546 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.379216 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.379241 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.379249 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.379257 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.379264 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.439134 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.439243 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:56 crc kubenswrapper[4492]: E1126 06:49:56.439240 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:56 crc kubenswrapper[4492]: E1126 06:49:56.439332 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.439360 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:56 crc kubenswrapper[4492]: E1126 06:49:56.439564 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.480964 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.481013 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.481023 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.481033 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.481040 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.582715 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.582748 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.582761 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.582774 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.582783 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.684615 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.684640 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.684648 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.684658 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.684666 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.786729 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.786759 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.786770 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.786781 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.786801 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.888076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.888099 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.888106 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.888116 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.888125 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.989787 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.989895 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.989962 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.990022 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:56 crc kubenswrapper[4492]: I1126 06:49:56.990098 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:56Z","lastTransitionTime":"2025-11-26T06:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.040798 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.040823 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.040830 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.040861 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.040871 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: E1126 06:49:57.049685 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.052139 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.052163 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.052187 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.052198 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.052205 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: E1126 06:49:57.060044 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.062068 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.062091 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.062098 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.062107 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.062115 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: E1126 06:49:57.070189 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.072006 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.072031 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.072039 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.072056 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.072064 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: E1126 06:49:57.079941 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.081871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.081905 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.081916 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.081928 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.081936 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: E1126 06:49:57.089966 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:49:57Z is after 2025-08-24T17:21:41Z" Nov 26 06:49:57 crc kubenswrapper[4492]: E1126 06:49:57.090075 4492 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.091953 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.091978 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.091989 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.092006 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.092013 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.195504 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.195598 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.195608 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.195617 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.195640 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.297344 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.297374 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.297382 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.297391 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.297398 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.398685 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.398707 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.398714 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.398726 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.398734 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.437586 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:57 crc kubenswrapper[4492]: E1126 06:49:57.437679 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.500066 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.500089 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.500098 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.500108 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.500116 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.601991 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.602018 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.602026 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.602036 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.602043 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.703409 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.703433 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.703441 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.703450 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.703457 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.805131 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.805166 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.805195 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.805207 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.805215 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.906136 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.906162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.906207 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.906219 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:57 crc kubenswrapper[4492]: I1126 06:49:57.906226 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:57Z","lastTransitionTime":"2025-11-26T06:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.007617 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.007649 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.007658 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.007670 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.007677 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.109695 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.109719 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.109728 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.109740 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.109751 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.211934 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.211965 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.211975 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.211987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.211996 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.314744 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.314871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.314931 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.314993 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.315067 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.417102 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.417140 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.417150 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.417189 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.417204 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.437718 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.437722 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.437731 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:49:58 crc kubenswrapper[4492]: E1126 06:49:58.437962 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:49:58 crc kubenswrapper[4492]: E1126 06:49:58.437912 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:49:58 crc kubenswrapper[4492]: E1126 06:49:58.438191 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.518848 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.518871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.518880 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.518889 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.518896 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.621708 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.621769 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.621780 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.621795 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.621805 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.723709 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.723744 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.723753 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.723782 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.723791 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.825862 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.826091 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.826185 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.826250 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.826319 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.928088 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.928119 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.928127 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.928139 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:58 crc kubenswrapper[4492]: I1126 06:49:58.928148 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:58Z","lastTransitionTime":"2025-11-26T06:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.030008 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.030310 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.030391 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.030467 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.030527 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.132874 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.132903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.132913 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.132927 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.132935 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.250300 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.250340 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.250349 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.250362 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.250370 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.351655 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.351683 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.351691 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.351702 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.351710 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.437923 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:49:59 crc kubenswrapper[4492]: E1126 06:49:59.438019 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.453107 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.453160 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.453187 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.453202 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.453212 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.554986 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.555091 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.555101 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.555111 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.555119 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.656752 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.656875 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.656948 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.657021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.657097 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.759096 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.759194 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.759204 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.759215 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.759221 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.860521 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.860558 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.860567 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.860580 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.860589 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.961761 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.961797 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.961806 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.961820 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:49:59 crc kubenswrapper[4492]: I1126 06:49:59.961829 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:49:59Z","lastTransitionTime":"2025-11-26T06:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.063474 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.063519 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.063528 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.063538 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.063544 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.165162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.165201 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.165209 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.165220 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.165228 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.266951 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.266973 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.266981 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.266990 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.266997 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.369238 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.369265 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.369282 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.369292 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.369300 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.437965 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.438016 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:00 crc kubenswrapper[4492]: E1126 06:50:00.438064 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:00 crc kubenswrapper[4492]: E1126 06:50:00.438129 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.438167 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:00 crc kubenswrapper[4492]: E1126 06:50:00.438299 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.470820 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.470850 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.470859 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.470871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.470880 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.572819 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.572850 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.572858 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.572869 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.572879 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.674615 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.674644 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.674656 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.674669 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.674680 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.776448 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.776464 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.776473 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.776484 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.776492 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.877777 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.877799 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.877807 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.877817 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.877825 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.979881 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.979918 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.979928 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.979940 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:00 crc kubenswrapper[4492]: I1126 06:50:00.979950 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:00Z","lastTransitionTime":"2025-11-26T06:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.081282 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.081302 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.081311 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.081319 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.081325 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.183044 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.183070 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.183077 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.183086 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.183092 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.284713 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.284746 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.284755 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.284769 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.284776 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.386831 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.386864 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.386875 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.386886 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.386895 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.437462 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:01 crc kubenswrapper[4492]: E1126 06:50:01.437540 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.487876 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.487904 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.487914 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.487926 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.487934 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.589073 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.589099 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.589109 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.589119 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.589126 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.690462 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.690496 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.690506 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.690516 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.690525 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.792147 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.792223 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.792233 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.792243 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.792250 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.893134 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.893159 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.893167 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.893212 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.893221 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.995071 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.995099 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.995108 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.995117 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:01 crc kubenswrapper[4492]: I1126 06:50:01.995124 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:01Z","lastTransitionTime":"2025-11-26T06:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.096639 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.096686 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.096697 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.096707 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.096714 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:02Z","lastTransitionTime":"2025-11-26T06:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.198402 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.198437 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.198446 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.198460 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.198470 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:02Z","lastTransitionTime":"2025-11-26T06:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.299939 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.299967 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.299976 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.299987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.300003 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:02Z","lastTransitionTime":"2025-11-26T06:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.401855 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.401885 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.401893 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.401906 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.401914 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:02Z","lastTransitionTime":"2025-11-26T06:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.438114 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.438132 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:02 crc kubenswrapper[4492]: E1126 06:50:02.438217 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.438247 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:02 crc kubenswrapper[4492]: E1126 06:50:02.438528 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:02 crc kubenswrapper[4492]: E1126 06:50:02.438662 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.503251 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.503281 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.503306 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.503317 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.503325 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:02Z","lastTransitionTime":"2025-11-26T06:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.604792 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.604820 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.604828 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.604837 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.604845 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:02Z","lastTransitionTime":"2025-11-26T06:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.706365 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.706392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.706400 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.706409 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.706417 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:02Z","lastTransitionTime":"2025-11-26T06:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.807908 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.807946 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.807953 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.807963 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.807970 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:02Z","lastTransitionTime":"2025-11-26T06:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.909232 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.909257 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.909264 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.909274 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:02 crc kubenswrapper[4492]: I1126 06:50:02.909283 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:02Z","lastTransitionTime":"2025-11-26T06:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.011753 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.011792 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.011802 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.011814 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.011828 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.113491 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.113524 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.113535 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.113546 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.113555 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.215327 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.215364 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.215376 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.215392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.215402 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.317417 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.317441 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.317449 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.317460 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.317468 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.419247 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.419289 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.419297 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.419307 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.419314 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.437648 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:03 crc kubenswrapper[4492]: E1126 06:50:03.437748 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.520960 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.520986 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.520996 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.521007 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.521014 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.622579 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.622609 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.622617 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.622629 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.622637 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.724508 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.724528 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.724536 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.724545 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.724551 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.826245 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.826267 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.826276 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.826286 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.826292 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.928205 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.928239 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.928250 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.928262 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:03 crc kubenswrapper[4492]: I1126 06:50:03.928272 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:03Z","lastTransitionTime":"2025-11-26T06:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.030222 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.030253 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.030262 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.030271 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.030278 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.132031 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.132076 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.132087 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.132097 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.132105 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.233861 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.233892 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.233901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.233910 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.233918 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.334948 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.334984 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.334995 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.335007 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.335016 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.437708 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:04 crc kubenswrapper[4492]: E1126 06:50:04.437777 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.437877 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:04 crc kubenswrapper[4492]: E1126 06:50:04.437926 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.437983 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.438001 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.438008 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.438017 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.438024 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.438360 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:04 crc kubenswrapper[4492]: E1126 06:50:04.438424 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.456740 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"026c3325-a592-4828-8e4f-08bcb790014a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c4955932b597b4b409c6c0bd2195c7918b56f1db3aca639a0d47656173b6176\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://70dcd5b71e0ed41855711d244d6628b740a92d6ada3fa3114a76c280b83e7402\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://46f1a12a78f284278cf74713bf7a46f4d62a7f40258ec65ae217b5fbb5729525\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://233ce2ea0f4758076db5c922254232e5cccdc2466e03c6044a0ba4077dd71e8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f662c3ac64d38717f3c8eb17f7ab22b7f9af88661f97ddae2caeec71b793a68\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39153e3f02feb4f9af860e81e6e426354d4113d26135aadb9bc708dcb2cb1770\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b34363c342a19d8eda1d5be4bea825a4e0f9a4281915b6ae5d0cc5b00ad7c926\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x29l5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-nrzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.464951 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6lnwf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0151e6e0-df4e-4482-9309-f8cce9bc6ccd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f91d7ead0eaa9a8c8d4ec6372d35236fc33de1f8606616efadfee2ec6a71324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cmsnp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6lnwf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.471794 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04bf18ad-d2a1-4b30-a3fa-2b6247363c82\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fad93839d2a51dffea51b659a6dcbfe24701e00ebb88e18329f7aa4351e1b4f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lwjt9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6blv7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.478340 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2cbf75e9-4b7e-454b-bcff-ebb0f537bb0e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://390b1499bd9aae353a574f1d5ca4243dda7d4576837cf40a0118c53cff23ebdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d88bf1783acd582780b9f9208f310d28ebb08a2ca510a916a0ddc6fad0c4d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.486697 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779b4f9a-92b7-4dcc-938a-e4de5decd688\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e170e91f442c9f45c7adfc9a5f8435cb51135522d5ac61f29829834c1f797e0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b87661ddeafdf124a87d6bc50755b340e32d88bbc35a005ae13aa66aa3b39ff4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a8bf4249c1551f054875ff3ef146502de6c99fd3afd10d78b41274196a35a6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.493943 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b32864c2-0866-4642-a872-7a5109d6f84f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3cdba573f24fecefce899a977a585bd480de506f64dcd8af6fd7f32f945b844d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b85db4fc799f48153a0e9db0b5b8316762f221aa873c03d675beb5e9939377bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8088d16b18d99a32c41b63cbd6181314e805595697cbc5f122864dff6fe7b324\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c51611cfe8bb2b69aeaa6d69f7e94deda73d79397c8e5a4ac4f0fe330a4b586\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.501792 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5bshd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a471ac3f-0ac0-4110-94bb-194c0de0af26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:35Z\\\",\\\"message\\\":\\\"2025-11-26T06:48:49+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277\\\\n2025-11-26T06:48:49+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_11d86e64-dae3-4806-bca9-064973c40277 to /host/opt/cni/bin/\\\\n2025-11-26T06:48:50Z [verbose] multus-daemon started\\\\n2025-11-26T06:48:50Z [verbose] Readiness Indicator file check\\\\n2025-11-26T06:49:35Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gt98z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5bshd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.510332 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"995e57c0-8e79-4857-8451-c7f7b51a05d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T06:48:41Z\\\",\\\"message\\\":\\\"le observer\\\\nW1126 06:48:41.573117 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1126 06:48:41.573321 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 06:48:41.575536 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3533228848/tls.crt::/tmp/serving-cert-3533228848/tls.key\\\\\\\"\\\\nI1126 06:48:41.958496 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 06:48:41.961105 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 06:48:41.961124 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 06:48:41.961145 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 06:48:41.961150 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 06:48:41.965068 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 06:48:41.965092 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965097 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 06:48:41.965101 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 06:48:41.965103 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 06:48:41.965106 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 06:48:41.965108 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 06:48:41.965323 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 06:48:41.966098 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.517966 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.525342 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.532517 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2644f0895688786b5b70f08011457eed33cb0a7962ac6dde6b60dd3276497011\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://27d142c5328a6f659cd2cee0b6535403ccbfb07aee1ea29c928c9d80a847f4ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.539941 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.539963 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.540094 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.540104 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.540114 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.540122 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.546015 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hjxcm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cfdb68d9-168b-4d04-a6ee-b2deef54a9ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b93237fa8e75f6423c8f194440aebb4fffec26f63b19b00396ee567fb454f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cpmw5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hjxcm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.552877 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d23da2c-14b7-4671-b87e-7506855ca163\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56a756e2798f3a758f7cc404b3c1e543389f88510f1f1bcef6bd603086b5ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://23c2c8bf0201054d839f80e0d6ee1423ef1140d1c59512cd787edbad1b611b2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c7zdg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:01Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2gwwb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.564873 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e39bab0a-78f2-4734-b5f5-f5e382ae2523\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed249be3ee240d3abc137ebe93a7f0595fd30e86f3d566029d2aafada08f0b3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d0d3a70003d231c58cf949db69367229e45c44813a969360fa4d22f17b676fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://711af18dbbce5875faa107da6007d9fa0fd99ba58c8e0e033ad956ada538a9c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bd197c99b6d8e405f2023f3985d14579f2da73008cb6219bde9ad309cf0c7b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7689d03ba1a45b28d33ecb66a7451c4e3767d076eba9751dc438b44bf692b0e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4bd6fe7d38f84cfedbd45716bbb9bc9265e332d672766c4c85c5910f4c59ee2d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4bd6fe7d38f84cfedbd45716bbb9bc9265e332d672766c4c85c5910f4c59ee2d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f51b9153796b1b926d5f2c0bdc5d55d1be2e33a0df568da392b932178973ef64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f51b9153796b1b926d5f2c0bdc5d55d1be2e33a0df568da392b932178973ef64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:25Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://8cfc778e8208b9ac4eecdcec38b72db997cdd2a68b19cc8e30bdde590da553fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8cfc778e8208b9ac4eecdcec38b72db997cdd2a68b19cc8e30bdde590da553fd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:24Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.572933 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78117c73db01f54f893d52844cab11a6257a1f6b6b582fb751065e1acc329620\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.579718 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4aa19f79274209a31db5cfe0a8ff6f71000fc4efb2d65dfab3f719d3a7f1ee9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.592578 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b104695-0850-4fb3-b2f8-f764435f8694\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:48:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T06:49:38Z\\\",\\\"message\\\":\\\"ts: [openshift-kube-controller-manager/kube-controller-manager-crc openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/iptables-alerter-4ln5h openshift-machine-config-operator/kube-rbac-proxy-crio-crc openshift-machine-config-operator/machine-config-daemon-6blv7 openshift-multus/multus-5bshd openshift-multus/network-metrics-daemon-s4gtb openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb openshift-image-registry/node-ca-hjxcm openshift-network-node-identity/network-node-identity-vrzqb openshift-ovn-kubernetes/ovnkube-node-lghgp openshift-multus/multus-additional-cni-plugins-nrzjd openshift-network-console/networking-console-plugin-85b44fc459-gdk6g openshift-network-diagnostics/network-check-target-xd92c]\\\\nI1126 06:49:38.114760 6374 services_controller.go:445] Built service openshift-marketplace/certified-operators LB template configs for network=default: []services.lbConfig(nil)\\\\nF1126 06:49:38.115033 6374 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initializatio\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T06:49:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T06:48:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T06:48:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T06:48:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mc5q6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:48:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-lghgp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.599539 4492 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T06:49:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hk29d\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T06:49:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-s4gtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:04Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.642240 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.642270 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.642279 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.642292 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.642303 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.744269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.744313 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.744327 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.744346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.744358 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.845691 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.845724 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.845732 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.845743 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.845753 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.947845 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.947876 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.947885 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.947895 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:04 crc kubenswrapper[4492]: I1126 06:50:04.947903 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:04Z","lastTransitionTime":"2025-11-26T06:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.050356 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.050401 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.050409 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.050422 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.050430 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.151720 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.151747 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.151756 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.151777 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.151785 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.253545 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.253575 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.253584 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.253596 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.253604 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.355520 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.355547 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.355555 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.355602 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.355613 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.438453 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:05 crc kubenswrapper[4492]: E1126 06:50:05.438750 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.457584 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.457632 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.457643 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.457654 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.457661 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.559500 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.559541 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.559549 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.559560 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.559569 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.661269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.661301 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.661311 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.661320 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.661328 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.762868 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.762906 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.762915 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.762925 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.762935 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.864298 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.864341 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.864349 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.864360 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.864370 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.965976 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.966003 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.966011 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.966020 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:05 crc kubenswrapper[4492]: I1126 06:50:05.966026 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:05Z","lastTransitionTime":"2025-11-26T06:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.067838 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.067884 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.067897 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.067914 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.067927 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.129692 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:06 crc kubenswrapper[4492]: E1126 06:50:06.129811 4492 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:50:06 crc kubenswrapper[4492]: E1126 06:50:06.129851 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs podName:1cc59fbe-82e1-406b-95b1-a26b6b8ef467 nodeName:}" failed. No retries permitted until 2025-11-26 06:51:10.129839714 +0000 UTC m=+166.013728012 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs") pod "network-metrics-daemon-s4gtb" (UID: "1cc59fbe-82e1-406b-95b1-a26b6b8ef467") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.169533 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.169577 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.169586 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.169595 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.169602 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.271245 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.271269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.271277 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.271285 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.271291 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.372909 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.372941 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.372950 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.372960 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.372967 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.438297 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:06 crc kubenswrapper[4492]: E1126 06:50:06.438383 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.438506 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:06 crc kubenswrapper[4492]: E1126 06:50:06.438590 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.438720 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:06 crc kubenswrapper[4492]: E1126 06:50:06.438951 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.474741 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.474762 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.474770 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.474778 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.474785 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.576644 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.576664 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.576671 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.576680 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.576687 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.677692 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.677751 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.677761 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.677773 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.677782 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.779615 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.779652 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.779660 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.779670 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.779677 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.880898 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.880947 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.880962 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.880977 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.880992 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.982748 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.982774 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.982782 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.982791 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:06 crc kubenswrapper[4492]: I1126 06:50:06.982799 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:06Z","lastTransitionTime":"2025-11-26T06:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.084363 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.084390 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.084401 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.084412 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.084420 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.113533 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.113560 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.113569 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.113577 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.113584 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: E1126 06:50:07.122515 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.124549 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.124576 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.124585 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.124595 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.124601 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: E1126 06:50:07.132192 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.134211 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.134240 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.134249 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.134258 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.134265 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: E1126 06:50:07.142061 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.143919 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.143961 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.143969 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.143978 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.143986 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: E1126 06:50:07.151496 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.153770 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.153796 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.153805 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.153816 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.153823 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: E1126 06:50:07.161059 4492 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148056Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608856Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T06:50:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"5a30a4c6-2314-4103-8c18-44e795d62516\\\",\\\"systemUUID\\\":\\\"836cf739-0185-4d24-bd92-dec4516ccf4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T06:50:07Z is after 2025-08-24T17:21:41Z" Nov 26 06:50:07 crc kubenswrapper[4492]: E1126 06:50:07.161159 4492 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.186000 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.186023 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.186032 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.186041 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.186056 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.287289 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.287393 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.287467 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.287530 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.287596 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.388823 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.388852 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.388860 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.388869 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.388876 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.437420 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:07 crc kubenswrapper[4492]: E1126 06:50:07.437491 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.437939 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:50:07 crc kubenswrapper[4492]: E1126 06:50:07.438060 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-lghgp_openshift-ovn-kubernetes(9b104695-0850-4fb3-b2f8-f764435f8694)\"" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.489763 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.489790 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.489799 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.489809 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.489816 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.591522 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.591547 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.591556 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.591565 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.591572 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.693220 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.693249 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.693257 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.693269 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.693276 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.794622 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.794647 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.794654 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.794663 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.794669 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.895664 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.895694 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.895704 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.895714 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.895721 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.997301 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.997327 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.997335 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.997346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:07 crc kubenswrapper[4492]: I1126 06:50:07.997354 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:07Z","lastTransitionTime":"2025-11-26T06:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.099548 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.099607 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.099620 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.099635 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.099645 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:08Z","lastTransitionTime":"2025-11-26T06:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.201368 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.201409 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.201422 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.201433 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.201446 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:08Z","lastTransitionTime":"2025-11-26T06:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.303265 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.303296 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.303305 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.303316 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.303324 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:08Z","lastTransitionTime":"2025-11-26T06:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.405268 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.405300 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.405308 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.405320 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.405330 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:08Z","lastTransitionTime":"2025-11-26T06:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.437811 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.437865 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:08 crc kubenswrapper[4492]: E1126 06:50:08.437913 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.437922 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:08 crc kubenswrapper[4492]: E1126 06:50:08.437986 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:08 crc kubenswrapper[4492]: E1126 06:50:08.438232 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.506441 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.506464 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.506472 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.506482 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.506490 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:08Z","lastTransitionTime":"2025-11-26T06:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.607967 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.608002 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.608010 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.608019 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.608026 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:08Z","lastTransitionTime":"2025-11-26T06:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.709947 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.709970 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.709978 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.709987 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.709994 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:08Z","lastTransitionTime":"2025-11-26T06:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.811454 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.811493 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.811502 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.811511 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.811519 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:08Z","lastTransitionTime":"2025-11-26T06:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.912646 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.912692 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.912701 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.912710 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:08 crc kubenswrapper[4492]: I1126 06:50:08.912717 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:08Z","lastTransitionTime":"2025-11-26T06:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.014458 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.014487 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.014496 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.014507 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.014515 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.116080 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.116113 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.116124 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.116134 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.116144 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.217154 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.217193 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.217201 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.217211 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.217219 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.318839 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.318857 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.318865 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.318875 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.318883 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.420581 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.420601 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.420608 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.420616 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.420623 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.438154 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:09 crc kubenswrapper[4492]: E1126 06:50:09.438259 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.522090 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.522116 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.522128 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.522141 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.522150 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.624106 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.624141 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.624151 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.624165 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.624195 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.726078 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.726112 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.726122 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.726134 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.726142 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.827898 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.827955 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.827966 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.827977 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.827985 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.929314 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.929425 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.929437 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.929447 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:09 crc kubenswrapper[4492]: I1126 06:50:09.929455 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:09Z","lastTransitionTime":"2025-11-26T06:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.031323 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.031353 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.031362 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.031371 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.031379 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.133061 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.133086 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.133094 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.133104 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.133112 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.234213 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.234245 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.234254 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.234266 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.234275 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.336167 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.336207 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.336215 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.336224 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.336230 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.437678 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.437812 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.437829 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.437871 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.437882 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.437895 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.437902 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: E1126 06:50:10.437868 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:10 crc kubenswrapper[4492]: E1126 06:50:10.437976 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.438027 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:10 crc kubenswrapper[4492]: E1126 06:50:10.438116 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.539566 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.539589 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.539600 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.539615 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.539623 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.641007 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.641057 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.641067 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.641080 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.641089 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.742914 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.742946 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.742954 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.742964 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.742971 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.844162 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.844206 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.844215 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.844226 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.844235 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.946109 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.946156 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.946227 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.946265 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:10 crc kubenswrapper[4492]: I1126 06:50:10.946274 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:10Z","lastTransitionTime":"2025-11-26T06:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.047975 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.048009 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.048021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.048033 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.048053 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.149478 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.149501 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.149509 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.149518 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.149526 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.251192 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.251226 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.251235 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.251247 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.251257 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.353118 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.353212 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.353223 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.353233 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.353240 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.438101 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:11 crc kubenswrapper[4492]: E1126 06:50:11.438212 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.454300 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.454330 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.454355 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.454369 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.454377 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.555786 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.555834 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.555842 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.555851 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.555859 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.657832 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.657945 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.658011 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.658086 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.658102 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.759396 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.759418 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.759425 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.759434 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.759443 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.861199 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.861233 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.861243 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.861258 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.861268 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.963346 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.963650 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.963727 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.963797 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:11 crc kubenswrapper[4492]: I1126 06:50:11.963861 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:11Z","lastTransitionTime":"2025-11-26T06:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.065024 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.065080 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.065089 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.065100 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.065109 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.166544 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.166641 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.166705 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.166780 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.166836 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.268284 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.268330 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.268340 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.268352 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.268361 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.370445 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.370498 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.370509 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.370521 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.370530 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.438106 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.438230 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:12 crc kubenswrapper[4492]: E1126 06:50:12.438379 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.438247 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:12 crc kubenswrapper[4492]: E1126 06:50:12.438543 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:12 crc kubenswrapper[4492]: E1126 06:50:12.438230 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.471868 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.471909 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.471917 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.471927 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.471937 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.573686 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.573792 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.573897 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.573961 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.574026 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.675354 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.675445 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.675510 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.675572 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.675622 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.777016 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.777059 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.777068 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.777099 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.777108 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.878251 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.878281 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.878289 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.878298 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.878306 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.979595 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.979621 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.979630 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.979641 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:12 crc kubenswrapper[4492]: I1126 06:50:12.979650 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:12Z","lastTransitionTime":"2025-11-26T06:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.081260 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.081285 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.081294 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.081303 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.081331 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.182392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.182430 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.182440 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.182449 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.182456 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.283868 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.283903 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.283912 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.283925 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.283934 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.385640 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.385669 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.385677 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.385687 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.385696 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.437987 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:13 crc kubenswrapper[4492]: E1126 06:50:13.438113 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.487842 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.487991 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.488062 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.488121 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.488195 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.589702 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.589746 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.589755 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.589766 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.589774 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.691626 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.691765 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.691836 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.691901 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.691958 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.793416 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.793457 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.793466 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.793481 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.793489 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.895411 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.895455 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.895464 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.895475 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.895481 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.997335 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.997373 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.997381 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.997395 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:13 crc kubenswrapper[4492]: I1126 06:50:13.997403 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:13Z","lastTransitionTime":"2025-11-26T06:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.099351 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.099398 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.099426 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.099438 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.099446 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:14Z","lastTransitionTime":"2025-11-26T06:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.201276 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.201305 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.201321 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.201331 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.201338 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:14Z","lastTransitionTime":"2025-11-26T06:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.303270 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.303299 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.303309 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.303318 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.303326 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:14Z","lastTransitionTime":"2025-11-26T06:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.404214 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.404249 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.404259 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.404270 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.404279 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:14Z","lastTransitionTime":"2025-11-26T06:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.437628 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:14 crc kubenswrapper[4492]: E1126 06:50:14.437703 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.437737 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.437789 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:14 crc kubenswrapper[4492]: E1126 06:50:14.437836 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:14 crc kubenswrapper[4492]: E1126 06:50:14.438032 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.465529 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2gwwb" podStartSLOduration=86.46551786 podStartE2EDuration="1m26.46551786s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.453464993 +0000 UTC m=+110.337353290" watchObservedRunningTime="2025-11-26 06:50:14.46551786 +0000 UTC m=+110.349406158" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.465669 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=92.465665317 podStartE2EDuration="1m32.465665317s" podCreationTimestamp="2025-11-26 06:48:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.465120623 +0000 UTC m=+110.349008921" watchObservedRunningTime="2025-11-26 06:50:14.465665317 +0000 UTC m=+110.349553615" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.505419 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.505462 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.505470 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.505485 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.505495 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:14Z","lastTransitionTime":"2025-11-26T06:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.528337 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=23.528324903 podStartE2EDuration="23.528324903s" podCreationTimestamp="2025-11-26 06:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.527840764 +0000 UTC m=+110.411729062" watchObservedRunningTime="2025-11-26 06:50:14.528324903 +0000 UTC m=+110.412213202" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.528594 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-hjxcm" podStartSLOduration=86.52858869 podStartE2EDuration="1m26.52858869s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.511671249 +0000 UTC m=+110.395559546" watchObservedRunningTime="2025-11-26 06:50:14.52858869 +0000 UTC m=+110.412476987" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.582771 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-nrzjd" podStartSLOduration=86.582760452 podStartE2EDuration="1m26.582760452s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.58241938 +0000 UTC m=+110.466307678" watchObservedRunningTime="2025-11-26 06:50:14.582760452 +0000 UTC m=+110.466648750" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.597941 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-6lnwf" podStartSLOduration=86.597931169 podStartE2EDuration="1m26.597931169s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.59022852 +0000 UTC m=+110.474116848" watchObservedRunningTime="2025-11-26 06:50:14.597931169 +0000 UTC m=+110.481819467" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.607757 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.607788 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.607797 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.607809 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.607819 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:14Z","lastTransitionTime":"2025-11-26T06:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.610550 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=38.610534971999996 podStartE2EDuration="38.610534972s" podCreationTimestamp="2025-11-26 06:49:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.610105103 +0000 UTC m=+110.493993401" watchObservedRunningTime="2025-11-26 06:50:14.610534972 +0000 UTC m=+110.494423269" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.610880 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podStartSLOduration=86.610875682 podStartE2EDuration="1m26.610875682s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.597701708 +0000 UTC m=+110.481590006" watchObservedRunningTime="2025-11-26 06:50:14.610875682 +0000 UTC m=+110.494763980" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.628298 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=87.628283645 podStartE2EDuration="1m27.628283645s" podCreationTimestamp="2025-11-26 06:48:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.619826969 +0000 UTC m=+110.503715267" watchObservedRunningTime="2025-11-26 06:50:14.628283645 +0000 UTC m=+110.512171944" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.628864 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=63.628859778 podStartE2EDuration="1m3.628859778s" podCreationTimestamp="2025-11-26 06:49:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.627684279 +0000 UTC m=+110.511572577" watchObservedRunningTime="2025-11-26 06:50:14.628859778 +0000 UTC m=+110.512748076" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.709495 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.709536 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.709545 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.709559 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.709568 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:14Z","lastTransitionTime":"2025-11-26T06:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.811004 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.811034 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.811052 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.811063 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.811072 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:14Z","lastTransitionTime":"2025-11-26T06:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.913296 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.913327 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.913336 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.913347 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:14 crc kubenswrapper[4492]: I1126 06:50:14.913358 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:14Z","lastTransitionTime":"2025-11-26T06:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.014943 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.014976 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.014986 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.014999 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.015009 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.116323 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.116378 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.116387 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.116401 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.116410 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.217820 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.217883 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.217895 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.217908 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.217917 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.319592 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.319623 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.319631 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.319643 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.319652 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.421789 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.421815 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.421825 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.421835 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.421843 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.438086 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:15 crc kubenswrapper[4492]: E1126 06:50:15.438190 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.523774 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.523804 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.523836 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.523849 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.523858 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.625203 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.625231 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.625242 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.625252 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.625259 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.727071 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.727120 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.727134 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.727152 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.727165 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.829038 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.829082 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.829091 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.829101 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.829108 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.930977 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.931011 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.931021 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.931078 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:15 crc kubenswrapper[4492]: I1126 06:50:15.931087 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:15Z","lastTransitionTime":"2025-11-26T06:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.033344 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.033376 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.033386 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.033397 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.033406 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.135725 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.135765 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.135774 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.135785 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.135792 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.236763 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.236822 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.236837 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.236850 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.236860 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.338371 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.338400 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.338409 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.338419 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.338427 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.438622 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.438650 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:16 crc kubenswrapper[4492]: E1126 06:50:16.438708 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:16 crc kubenswrapper[4492]: E1126 06:50:16.438886 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.438890 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:16 crc kubenswrapper[4492]: E1126 06:50:16.438971 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.439419 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.439446 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.439455 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.439464 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.439471 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.541131 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.541157 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.541166 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.541196 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.541204 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.642960 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.643009 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.643028 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.643049 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.643058 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.744463 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.744484 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.744492 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.744505 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.744512 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.846397 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.846420 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.846428 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.846436 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.846443 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.947932 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.947964 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.947972 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.947982 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:16 crc kubenswrapper[4492]: I1126 06:50:16.947990 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:16Z","lastTransitionTime":"2025-11-26T06:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.049770 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.049804 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.049813 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.049824 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.049832 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:17Z","lastTransitionTime":"2025-11-26T06:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.151365 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.151392 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.151402 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.151431 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.151440 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:17Z","lastTransitionTime":"2025-11-26T06:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.253402 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.253439 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.253448 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.253458 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.253465 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:17Z","lastTransitionTime":"2025-11-26T06:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.355281 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.355308 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.355316 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.355326 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.355334 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:17Z","lastTransitionTime":"2025-11-26T06:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.437937 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:17 crc kubenswrapper[4492]: E1126 06:50:17.438014 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.457488 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.457514 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.457522 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.457533 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.457542 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:17Z","lastTransitionTime":"2025-11-26T06:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.484523 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.484551 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.484560 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.484571 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.484577 4492 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T06:50:17Z","lastTransitionTime":"2025-11-26T06:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.512236 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-5bshd" podStartSLOduration=89.512224172 podStartE2EDuration="1m29.512224172s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:14.641581152 +0000 UTC m=+110.525469450" watchObservedRunningTime="2025-11-26 06:50:17.512224172 +0000 UTC m=+113.396112470" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.512868 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg"] Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.513164 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.514399 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.514515 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.515071 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.515464 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.613459 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8ef43aea-8071-4d8a-b77b-773e1958742f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.613498 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8ef43aea-8071-4d8a-b77b-773e1958742f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.613516 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8ef43aea-8071-4d8a-b77b-773e1958742f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.613571 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8ef43aea-8071-4d8a-b77b-773e1958742f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.613588 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8ef43aea-8071-4d8a-b77b-773e1958742f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.714776 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8ef43aea-8071-4d8a-b77b-773e1958742f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.714816 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8ef43aea-8071-4d8a-b77b-773e1958742f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.714842 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8ef43aea-8071-4d8a-b77b-773e1958742f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.714880 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8ef43aea-8071-4d8a-b77b-773e1958742f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.714897 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8ef43aea-8071-4d8a-b77b-773e1958742f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.714928 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8ef43aea-8071-4d8a-b77b-773e1958742f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.715034 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8ef43aea-8071-4d8a-b77b-773e1958742f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.715587 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8ef43aea-8071-4d8a-b77b-773e1958742f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.719278 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8ef43aea-8071-4d8a-b77b-773e1958742f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.726996 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8ef43aea-8071-4d8a-b77b-773e1958742f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-p65pg\" (UID: \"8ef43aea-8071-4d8a-b77b-773e1958742f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.824645 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" Nov 26 06:50:17 crc kubenswrapper[4492]: I1126 06:50:17.886294 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" event={"ID":"8ef43aea-8071-4d8a-b77b-773e1958742f","Type":"ContainerStarted","Data":"3e0cc735585eca3715dcd353b9859d070627cf66defc81a118d464879552944e"} Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.437913 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.437965 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:18 crc kubenswrapper[4492]: E1126 06:50:18.438031 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.438094 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:18 crc kubenswrapper[4492]: E1126 06:50:18.438136 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:18 crc kubenswrapper[4492]: E1126 06:50:18.438201 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.438747 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.890061 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/3.log" Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.892289 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerStarted","Data":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.892616 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.893137 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" event={"ID":"8ef43aea-8071-4d8a-b77b-773e1958742f","Type":"ContainerStarted","Data":"beaf34ff3b0dfa87ab24f279e4fb25f9273d3bba5da55cd0de9e87ac89a93b31"} Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.915941 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podStartSLOduration=90.915931048 podStartE2EDuration="1m30.915931048s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:18.915161842 +0000 UTC m=+114.799050140" watchObservedRunningTime="2025-11-26 06:50:18.915931048 +0000 UTC m=+114.799819346" Nov 26 06:50:18 crc kubenswrapper[4492]: I1126 06:50:18.925195 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p65pg" podStartSLOduration=90.925166259 podStartE2EDuration="1m30.925166259s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:18.924490037 +0000 UTC m=+114.808378346" watchObservedRunningTime="2025-11-26 06:50:18.925166259 +0000 UTC m=+114.809054556" Nov 26 06:50:19 crc kubenswrapper[4492]: I1126 06:50:19.048602 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-s4gtb"] Nov 26 06:50:19 crc kubenswrapper[4492]: I1126 06:50:19.048695 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:19 crc kubenswrapper[4492]: E1126 06:50:19.048771 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:19 crc kubenswrapper[4492]: I1126 06:50:19.438450 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:19 crc kubenswrapper[4492]: E1126 06:50:19.438670 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:20 crc kubenswrapper[4492]: I1126 06:50:20.437792 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:20 crc kubenswrapper[4492]: I1126 06:50:20.437884 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:20 crc kubenswrapper[4492]: E1126 06:50:20.438027 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 06:50:20 crc kubenswrapper[4492]: E1126 06:50:20.438136 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.437671 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:21 crc kubenswrapper[4492]: E1126 06:50:21.437782 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-s4gtb" podUID="1cc59fbe-82e1-406b-95b1-a26b6b8ef467" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.438286 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:21 crc kubenswrapper[4492]: E1126 06:50:21.438427 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.638556 4492 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.638651 4492 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.664933 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-kv6rz"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.665401 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.665669 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7nxg8"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.666018 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.666210 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.666541 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.667192 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-v92zj"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.667494 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.668653 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.670581 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.671685 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vl9xd"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.672034 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.672111 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.673450 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.673833 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.674713 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.674881 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.675120 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.675142 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.675535 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.677324 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.677715 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.677916 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-b65ps"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.678284 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.679205 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-zvw72"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.679479 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-zvw72" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.679705 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.680053 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.681508 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.681763 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.682201 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.682649 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.682965 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.685151 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.685605 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.685927 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.686113 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2d74v"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.686363 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.686644 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.686846 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.689146 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.689287 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.689403 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.689517 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.689644 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.690280 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.690420 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.690508 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.690607 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.690698 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.690803 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.690882 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.690960 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.691052 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.691150 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.691534 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.691739 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.691833 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.691916 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.692005 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.692510 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.695848 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-284jz"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.705957 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.706001 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.706427 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.706535 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.706543 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.706625 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.706792 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.707540 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.709314 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.709843 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.714201 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.730410 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.731222 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.731604 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.741909 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.741910 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.742244 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.743295 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.743754 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.748477 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-sb6qj"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.748835 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.749415 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.749559 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.749693 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.749786 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.749920 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.749929 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750253 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750369 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750374 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fade707-6af5-462b-bc3b-421465649292-config\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750473 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750498 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fe63b27-ce8d-40a9-96db-fd485ede156a-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kkhsz\" (UID: \"8fe63b27-ce8d-40a9-96db-fd485ede156a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750518 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee15157b-cc93-408f-9520-421d06b48f34-serving-cert\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750534 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-oauth-serving-cert\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750551 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfckt\" (UniqueName: \"kubernetes.io/projected/a0421328-5a0e-4e84-ba97-1926057962e6-kube-api-access-kfckt\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750578 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85b4355d-1d0f-4cf3-9902-4b68bd36704a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750596 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/af74f854-fe31-479b-b8c7-83ec85b6d279-images\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750612 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-audit\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750631 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-image-import-ca\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750646 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85b4355d-1d0f-4cf3-9902-4b68bd36704a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750662 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a0421328-5a0e-4e84-ba97-1926057962e6-audit-dir\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750692 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-config\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750708 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/61b9d2cc-8f4e-41b9-a186-c6f1613da80d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-6cm4x\" (UID: \"61b9d2cc-8f4e-41b9-a186-c6f1613da80d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750724 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/185556de-8388-45e5-b20e-f5f7fca74dd4-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-62jrz\" (UID: \"185556de-8388-45e5-b20e-f5f7fca74dd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750750 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8d9f\" (UniqueName: \"kubernetes.io/projected/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-kube-api-access-m8d9f\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750767 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750784 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750800 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/85b4355d-1d0f-4cf3-9902-4b68bd36704a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750817 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qq4r\" (UniqueName: \"kubernetes.io/projected/587ccafa-460d-41b6-bced-9a82822fa43c-kube-api-access-8qq4r\") pod \"downloads-7954f5f757-zvw72\" (UID: \"587ccafa-460d-41b6-bced-9a82822fa43c\") " pod="openshift-console/downloads-7954f5f757-zvw72" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750833 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/abfcd3ff-81af-4eff-91d1-3329b3b437af-node-pullsecrets\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750848 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750886 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zh69p\" (UniqueName: \"kubernetes.io/projected/ab14021b-87d7-43d0-9357-e8739e2d7dd1-kube-api-access-zh69p\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750950 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1394fe3d-57d3-4340-9c97-1d75ac5e8ce4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mstsb\" (UID: \"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.750970 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fe63b27-ce8d-40a9-96db-fd485ede156a-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kkhsz\" (UID: \"8fe63b27-ce8d-40a9-96db-fd485ede156a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751071 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkmck\" (UniqueName: \"kubernetes.io/projected/774cf983-095e-498f-9d55-6b7c5be37265-kube-api-access-fkmck\") pod \"openshift-config-operator-7777fb866f-vvl6g\" (UID: \"774cf983-095e-498f-9d55-6b7c5be37265\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751095 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/af74f854-fe31-479b-b8c7-83ec85b6d279-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751214 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-service-ca\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751240 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa815e7b-9b9b-4dd9-bd08-0104024e227e-serving-cert\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751283 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/abfcd3ff-81af-4eff-91d1-3329b3b437af-audit-dir\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751356 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751388 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751433 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/774cf983-095e-498f-9d55-6b7c5be37265-available-featuregates\") pod \"openshift-config-operator-7777fb866f-vvl6g\" (UID: \"774cf983-095e-498f-9d55-6b7c5be37265\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751452 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xc2n\" (UniqueName: \"kubernetes.io/projected/1fade707-6af5-462b-bc3b-421465649292-kube-api-access-5xc2n\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751468 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/815ac909-aa66-4d5c-bbaf-1ef88810cb22-serving-cert\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751530 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751548 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-serving-cert\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751604 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/815ac909-aa66-4d5c-bbaf-1ef88810cb22-config\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751623 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-trusted-ca\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751730 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6z7s6\" (UniqueName: \"kubernetes.io/projected/61b9d2cc-8f4e-41b9-a186-c6f1613da80d-kube-api-access-6z7s6\") pod \"cluster-samples-operator-665b6dd947-6cm4x\" (UID: \"61b9d2cc-8f4e-41b9-a186-c6f1613da80d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751749 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ncvn\" (UniqueName: \"kubernetes.io/projected/abfcd3ff-81af-4eff-91d1-3329b3b437af-kube-api-access-5ncvn\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751804 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1fade707-6af5-462b-bc3b-421465649292-auth-proxy-config\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751827 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dsh7\" (UniqueName: \"kubernetes.io/projected/85b4355d-1d0f-4cf3-9902-4b68bd36704a-kube-api-access-2dsh7\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751842 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-bound-sa-token\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751894 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-trusted-ca-bundle\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751916 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/abfcd3ff-81af-4eff-91d1-3329b3b437af-etcd-client\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751958 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6498f1f9-685a-4a0f-a108-ad49ed7b7576-metrics-tls\") pod \"dns-operator-744455d44c-b65ps\" (UID: \"6498f1f9-685a-4a0f-a108-ad49ed7b7576\") " pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.751975 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752012 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/185556de-8388-45e5-b20e-f5f7fca74dd4-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-62jrz\" (UID: \"185556de-8388-45e5-b20e-f5f7fca74dd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752048 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/185556de-8388-45e5-b20e-f5f7fca74dd4-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-62jrz\" (UID: \"185556de-8388-45e5-b20e-f5f7fca74dd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752069 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4vg4\" (UniqueName: \"kubernetes.io/projected/815ac909-aa66-4d5c-bbaf-1ef88810cb22-kube-api-access-p4vg4\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752085 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752101 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-config\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752119 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/abfcd3ff-81af-4eff-91d1-3329b3b437af-encryption-config\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752135 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1394fe3d-57d3-4340-9c97-1d75ac5e8ce4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mstsb\" (UID: \"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752151 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-trusted-ca-bundle\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752185 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-client-ca\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752208 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c429\" (UniqueName: \"kubernetes.io/projected/af74f854-fe31-479b-b8c7-83ec85b6d279-kube-api-access-2c429\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752224 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752240 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/815ac909-aa66-4d5c-bbaf-1ef88810cb22-trusted-ca\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752253 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/abfcd3ff-81af-4eff-91d1-3329b3b437af-serving-cert\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752268 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-config\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752284 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-metrics-tls\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752309 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-etcd-serving-ca\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752327 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67rfq\" (UniqueName: \"kubernetes.io/projected/6498f1f9-685a-4a0f-a108-ad49ed7b7576-kube-api-access-67rfq\") pod \"dns-operator-744455d44c-b65ps\" (UID: \"6498f1f9-685a-4a0f-a108-ad49ed7b7576\") " pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752342 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-oauth-config\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752359 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1394fe3d-57d3-4340-9c97-1d75ac5e8ce4-config\") pod \"kube-apiserver-operator-766d6c64bb-mstsb\" (UID: \"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752377 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1fade707-6af5-462b-bc3b-421465649292-machine-approver-tls\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752393 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-audit-policies\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752411 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/ee15157b-cc93-408f-9520-421d06b48f34-kube-api-access-5b7x6\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752426 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/774cf983-095e-498f-9d55-6b7c5be37265-serving-cert\") pod \"openshift-config-operator-7777fb866f-vvl6g\" (UID: \"774cf983-095e-498f-9d55-6b7c5be37265\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752441 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-config\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752459 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752474 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752500 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fe63b27-ce8d-40a9-96db-fd485ede156a-config\") pod \"kube-controller-manager-operator-78b949d7b-kkhsz\" (UID: \"8fe63b27-ce8d-40a9-96db-fd485ede156a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752577 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-client-ca\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752598 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx5tz\" (UniqueName: \"kubernetes.io/projected/aa815e7b-9b9b-4dd9-bd08-0104024e227e-kube-api-access-wx5tz\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.752614 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af74f854-fe31-479b-b8c7-83ec85b6d279-config\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.811640 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.818246 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.818791 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.827064 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.827106 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.827243 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.827283 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.827406 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.827704 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.827841 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.828361 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.828562 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.828941 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.829367 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.829476 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.829573 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.829637 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.829655 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.829720 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.829735 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.829801 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.829868 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.830247 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.830338 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.830390 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.830432 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.830769 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.830904 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831000 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831100 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831090 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831155 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831273 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-kfm7d"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831320 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831388 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831738 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831812 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831816 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831846 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832014 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.831882 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832053 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832133 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832147 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832258 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832308 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832309 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832490 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832746 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.832882 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.833159 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.840354 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.840505 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.846532 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.851903 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-r6lm8"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.852314 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.852452 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.852650 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.853289 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.853578 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.869706 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/abfcd3ff-81af-4eff-91d1-3329b3b437af-etcd-client\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.876556 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.880509 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.882492 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.853297 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/abfcd3ff-81af-4eff-91d1-3329b3b437af-etcd-client\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.882910 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4thv\" (UniqueName: \"kubernetes.io/projected/05a0ee0a-7b86-490a-8638-8d74ad1446ea-kube-api-access-h4thv\") pod \"marketplace-operator-79b997595-sb6qj\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.883027 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.883152 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/185556de-8388-45e5-b20e-f5f7fca74dd4-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-62jrz\" (UID: \"185556de-8388-45e5-b20e-f5f7fca74dd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.883270 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/185556de-8388-45e5-b20e-f5f7fca74dd4-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-62jrz\" (UID: \"185556de-8388-45e5-b20e-f5f7fca74dd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.883436 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6498f1f9-685a-4a0f-a108-ad49ed7b7576-metrics-tls\") pod \"dns-operator-744455d44c-b65ps\" (UID: \"6498f1f9-685a-4a0f-a108-ad49ed7b7576\") " pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.883719 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4vg4\" (UniqueName: \"kubernetes.io/projected/815ac909-aa66-4d5c-bbaf-1ef88810cb22-kube-api-access-p4vg4\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.883812 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.883882 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-config\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.883961 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/abfcd3ff-81af-4eff-91d1-3329b3b437af-encryption-config\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.884060 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9pfq\" (UniqueName: \"kubernetes.io/projected/17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4-kube-api-access-q9pfq\") pod \"kube-storage-version-migrator-operator-b67b599dd-dtlxh\" (UID: \"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.884153 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/bbde335f-1472-40de-b5ab-9867bc9b44cd-srv-cert\") pod \"olm-operator-6b444d44fb-q68m6\" (UID: \"bbde335f-1472-40de-b5ab-9867bc9b44cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.884258 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9b86c64-f8b8-470e-ad5b-723c1122418f-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-c6mkp\" (UID: \"c9b86c64-f8b8-470e-ad5b-723c1122418f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.884347 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1394fe3d-57d3-4340-9c97-1d75ac5e8ce4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mstsb\" (UID: \"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.884444 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-trusted-ca-bundle\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.884535 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-client-ca\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.884684 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c429\" (UniqueName: \"kubernetes.io/projected/af74f854-fe31-479b-b8c7-83ec85b6d279-kube-api-access-2c429\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.884783 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.885060 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/815ac909-aa66-4d5c-bbaf-1ef88810cb22-trusted-ca\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.885197 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/abfcd3ff-81af-4eff-91d1-3329b3b437af-serving-cert\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.885318 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5z5f\" (UniqueName: \"kubernetes.io/projected/c9b86c64-f8b8-470e-ad5b-723c1122418f-kube-api-access-n5z5f\") pod \"openshift-controller-manager-operator-756b6f6bc6-c6mkp\" (UID: \"c9b86c64-f8b8-470e-ad5b-723c1122418f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.885403 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-sb6qj\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.885524 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-metrics-tls\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.885676 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-config\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.885852 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-etcd-serving-ca\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.885970 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67rfq\" (UniqueName: \"kubernetes.io/projected/6498f1f9-685a-4a0f-a108-ad49ed7b7576-kube-api-access-67rfq\") pod \"dns-operator-744455d44c-b65ps\" (UID: \"6498f1f9-685a-4a0f-a108-ad49ed7b7576\") " pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.886084 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-oauth-config\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.886160 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhdwg\" (UniqueName: \"kubernetes.io/projected/53620b74-a029-497b-99e5-ee35b5f45b7d-kube-api-access-mhdwg\") pod \"migrator-59844c95c7-f4fjj\" (UID: \"53620b74-a029-497b-99e5-ee35b5f45b7d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.887918 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9b86c64-f8b8-470e-ad5b-723c1122418f-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-c6mkp\" (UID: \"c9b86c64-f8b8-470e-ad5b-723c1122418f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.888155 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1394fe3d-57d3-4340-9c97-1d75ac5e8ce4-config\") pod \"kube-apiserver-operator-766d6c64bb-mstsb\" (UID: \"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.888429 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1fade707-6af5-462b-bc3b-421465649292-machine-approver-tls\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.888508 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-audit-policies\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.888638 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/ee15157b-cc93-408f-9520-421d06b48f34-kube-api-access-5b7x6\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.888733 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/774cf983-095e-498f-9d55-6b7c5be37265-serving-cert\") pod \"openshift-config-operator-7777fb866f-vvl6g\" (UID: \"774cf983-095e-498f-9d55-6b7c5be37265\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.888798 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-config\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.888869 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.888933 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889005 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fe63b27-ce8d-40a9-96db-fd485ede156a-config\") pod \"kube-controller-manager-operator-78b949d7b-kkhsz\" (UID: \"8fe63b27-ce8d-40a9-96db-fd485ede156a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889108 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-client-ca\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889197 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx5tz\" (UniqueName: \"kubernetes.io/projected/aa815e7b-9b9b-4dd9-bd08-0104024e227e-kube-api-access-wx5tz\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889271 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af74f854-fe31-479b-b8c7-83ec85b6d279-config\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889338 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fade707-6af5-462b-bc3b-421465649292-config\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889405 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889474 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fe63b27-ce8d-40a9-96db-fd485ede156a-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kkhsz\" (UID: \"8fe63b27-ce8d-40a9-96db-fd485ede156a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889760 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee15157b-cc93-408f-9520-421d06b48f34-serving-cert\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889887 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-sb6qj\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.889977 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfckt\" (UniqueName: \"kubernetes.io/projected/a0421328-5a0e-4e84-ba97-1926057962e6-kube-api-access-kfckt\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.890073 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-oauth-serving-cert\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.890152 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85b4355d-1d0f-4cf3-9902-4b68bd36704a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.891363 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/af74f854-fe31-479b-b8c7-83ec85b6d279-images\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.891463 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-audit\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.891534 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-image-import-ca\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.891620 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85b4355d-1d0f-4cf3-9902-4b68bd36704a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.891689 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a0421328-5a0e-4e84-ba97-1926057962e6-audit-dir\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.891758 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-config\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.891829 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/61b9d2cc-8f4e-41b9-a186-c6f1613da80d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-6cm4x\" (UID: \"61b9d2cc-8f4e-41b9-a186-c6f1613da80d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.891898 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/185556de-8388-45e5-b20e-f5f7fca74dd4-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-62jrz\" (UID: \"185556de-8388-45e5-b20e-f5f7fca74dd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.891981 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8d9f\" (UniqueName: \"kubernetes.io/projected/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-kube-api-access-m8d9f\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892061 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892140 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892236 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dtlxh\" (UID: \"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892309 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/85b4355d-1d0f-4cf3-9902-4b68bd36704a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892377 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qq4r\" (UniqueName: \"kubernetes.io/projected/587ccafa-460d-41b6-bced-9a82822fa43c-kube-api-access-8qq4r\") pod \"downloads-7954f5f757-zvw72\" (UID: \"587ccafa-460d-41b6-bced-9a82822fa43c\") " pod="openshift-console/downloads-7954f5f757-zvw72" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892448 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/abfcd3ff-81af-4eff-91d1-3329b3b437af-node-pullsecrets\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892519 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892581 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zh69p\" (UniqueName: \"kubernetes.io/projected/ab14021b-87d7-43d0-9357-e8739e2d7dd1-kube-api-access-zh69p\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892649 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1394fe3d-57d3-4340-9c97-1d75ac5e8ce4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mstsb\" (UID: \"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892716 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkmck\" (UniqueName: \"kubernetes.io/projected/774cf983-095e-498f-9d55-6b7c5be37265-kube-api-access-fkmck\") pod \"openshift-config-operator-7777fb866f-vvl6g\" (UID: \"774cf983-095e-498f-9d55-6b7c5be37265\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892782 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ws2x\" (UniqueName: \"kubernetes.io/projected/bbde335f-1472-40de-b5ab-9867bc9b44cd-kube-api-access-2ws2x\") pod \"olm-operator-6b444d44fb-q68m6\" (UID: \"bbde335f-1472-40de-b5ab-9867bc9b44cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892844 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fe63b27-ce8d-40a9-96db-fd485ede156a-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kkhsz\" (UID: \"8fe63b27-ce8d-40a9-96db-fd485ede156a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892902 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/af74f854-fe31-479b-b8c7-83ec85b6d279-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.892968 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-service-ca\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.893033 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa815e7b-9b9b-4dd9-bd08-0104024e227e-serving-cert\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.893109 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/abfcd3ff-81af-4eff-91d1-3329b3b437af-audit-dir\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.893194 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.893283 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/bbde335f-1472-40de-b5ab-9867bc9b44cd-profile-collector-cert\") pod \"olm-operator-6b444d44fb-q68m6\" (UID: \"bbde335f-1472-40de-b5ab-9867bc9b44cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.893357 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.895718 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/774cf983-095e-498f-9d55-6b7c5be37265-available-featuregates\") pod \"openshift-config-operator-7777fb866f-vvl6g\" (UID: \"774cf983-095e-498f-9d55-6b7c5be37265\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.895820 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xc2n\" (UniqueName: \"kubernetes.io/projected/1fade707-6af5-462b-bc3b-421465649292-kube-api-access-5xc2n\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.895905 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/815ac909-aa66-4d5c-bbaf-1ef88810cb22-serving-cert\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.895985 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.896065 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-serving-cert\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.896142 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/815ac909-aa66-4d5c-bbaf-1ef88810cb22-config\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.883667 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.906589 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.907182 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.908297 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.909581 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.910262 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.911013 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.913088 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1394fe3d-57d3-4340-9c97-1d75ac5e8ce4-config\") pod \"kube-apiserver-operator-766d6c64bb-mstsb\" (UID: \"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.916090 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.917657 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.917750 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-config\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.918567 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-etcd-serving-ca\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.920285 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/815ac909-aa66-4d5c-bbaf-1ef88810cb22-config\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.921408 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-oauth-serving-cert\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.922534 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/abfcd3ff-81af-4eff-91d1-3329b3b437af-audit-dir\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.923229 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fade707-6af5-462b-bc3b-421465649292-config\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.923564 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.924253 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/774cf983-095e-498f-9d55-6b7c5be37265-available-featuregates\") pod \"openshift-config-operator-7777fb866f-vvl6g\" (UID: \"774cf983-095e-498f-9d55-6b7c5be37265\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.925379 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/815ac909-aa66-4d5c-bbaf-1ef88810cb22-serving-cert\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.927236 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/85b4355d-1d0f-4cf3-9902-4b68bd36704a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.927294 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-trusted-ca\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.927326 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6z7s6\" (UniqueName: \"kubernetes.io/projected/61b9d2cc-8f4e-41b9-a186-c6f1613da80d-kube-api-access-6z7s6\") pod \"cluster-samples-operator-665b6dd947-6cm4x\" (UID: \"61b9d2cc-8f4e-41b9-a186-c6f1613da80d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.927349 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ncvn\" (UniqueName: \"kubernetes.io/projected/abfcd3ff-81af-4eff-91d1-3329b3b437af-kube-api-access-5ncvn\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.927373 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1fade707-6af5-462b-bc3b-421465649292-auth-proxy-config\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.928269 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.928673 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.928782 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1fade707-6af5-462b-bc3b-421465649292-auth-proxy-config\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.930631 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.931624 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.932087 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.933912 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/af74f854-fe31-479b-b8c7-83ec85b6d279-images\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.935253 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dtlxh\" (UID: \"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.935840 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-config\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.936787 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.937910 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.938245 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/774cf983-095e-498f-9d55-6b7c5be37265-serving-cert\") pod \"openshift-config-operator-7777fb866f-vvl6g\" (UID: \"774cf983-095e-498f-9d55-6b7c5be37265\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.938300 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-config\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.939512 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-bound-sa-token\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.939555 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-trusted-ca-bundle\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.939584 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dsh7\" (UniqueName: \"kubernetes.io/projected/85b4355d-1d0f-4cf3-9902-4b68bd36704a-kube-api-access-2dsh7\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.940400 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-metrics-tls\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.941971 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee15157b-cc93-408f-9520-421d06b48f34-serving-cert\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.944907 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/abfcd3ff-81af-4eff-91d1-3329b3b437af-node-pullsecrets\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.944975 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.945675 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/185556de-8388-45e5-b20e-f5f7fca74dd4-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-62jrz\" (UID: \"185556de-8388-45e5-b20e-f5f7fca74dd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.946198 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-oauth-config\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.946733 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85b4355d-1d0f-4cf3-9902-4b68bd36704a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.946918 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/815ac909-aa66-4d5c-bbaf-1ef88810cb22-trusted-ca\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.947269 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/abfcd3ff-81af-4eff-91d1-3329b3b437af-serving-cert\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.948632 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-trusted-ca\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.948877 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-client-ca\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.950530 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.950744 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a0421328-5a0e-4e84-ba97-1926057962e6-audit-dir\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.950740 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.951795 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.952296 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fe63b27-ce8d-40a9-96db-fd485ede156a-config\") pod \"kube-controller-manager-operator-78b949d7b-kkhsz\" (UID: \"8fe63b27-ce8d-40a9-96db-fd485ede156a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.952324 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.952631 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa815e7b-9b9b-4dd9-bd08-0104024e227e-serving-cert\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.952923 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wpd96"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.952994 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.953411 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.953695 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.953902 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-audit\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.954456 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.954980 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-service-ca\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.954993 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-trusted-ca-bundle\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.955118 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.955383 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.955791 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1fade707-6af5-462b-bc3b-421465649292-machine-approver-tls\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.956321 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af74f854-fe31-479b-b8c7-83ec85b6d279-config\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.956465 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-trusted-ca-bundle\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.956707 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-client-ca\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.956823 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/abfcd3ff-81af-4eff-91d1-3329b3b437af-image-import-ca\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.957014 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.957015 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-audit-policies\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.957468 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/af74f854-fe31-479b-b8c7-83ec85b6d279-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.958946 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/185556de-8388-45e5-b20e-f5f7fca74dd4-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-62jrz\" (UID: \"185556de-8388-45e5-b20e-f5f7fca74dd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.959835 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.960051 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-config\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.960723 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6498f1f9-685a-4a0f-a108-ad49ed7b7576-metrics-tls\") pod \"dns-operator-744455d44c-b65ps\" (UID: \"6498f1f9-685a-4a0f-a108-ad49ed7b7576\") " pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.961085 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.961229 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.961276 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-serving-cert\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.962407 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-vzwh5"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.964313 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.967565 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-54mhl"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.967715 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.976917 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.977554 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fe63b27-ce8d-40a9-96db-fd485ede156a-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kkhsz\" (UID: \"8fe63b27-ce8d-40a9-96db-fd485ede156a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.977937 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/abfcd3ff-81af-4eff-91d1-3329b3b437af-encryption-config\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.978023 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.978382 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.980245 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1394fe3d-57d3-4340-9c97-1d75ac5e8ce4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mstsb\" (UID: \"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.979507 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/61b9d2cc-8f4e-41b9-a186-c6f1613da80d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-6cm4x\" (UID: \"61b9d2cc-8f4e-41b9-a186-c6f1613da80d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.984748 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-v9n6p"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.984865 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.988771 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.988617 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.989024 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.990474 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.990578 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.991281 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vl9xd"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.991318 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7nxg8"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.991384 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-kv6rz"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.991420 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-v92zj"] Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.991434 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:21 crc kubenswrapper[4492]: I1126 06:50:21.999118 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:21.999878 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.002572 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-zvw72"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.005134 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-s4zp9"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.006535 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.006839 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.007753 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.009499 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.010979 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.012830 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.015568 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-sb6qj"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.017297 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-b65ps"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.018461 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.020128 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-kfm7d"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.021073 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.023264 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.024300 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.025126 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.026112 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-284jz"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.027117 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.028342 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.029421 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.030819 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-8bhxb"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.032080 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.032282 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.033271 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2d74v"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.034224 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.035029 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.035902 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.036729 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.037595 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wpd96"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.038471 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-54mhl"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.039324 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-v9n6p"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.040156 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.040492 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxghw\" (UniqueName: \"kubernetes.io/projected/ad352c45-ea79-4ecd-a971-cd8d0ab2c046-kube-api-access-bxghw\") pod \"catalog-operator-68c6474976-czsvt\" (UID: \"ad352c45-ea79-4ecd-a971-cd8d0ab2c046\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.040533 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb7p4\" (UniqueName: \"kubernetes.io/projected/0c3ee07d-c654-4e14-afbd-fa5de3145b4d-kube-api-access-qb7p4\") pod \"machine-config-controller-84d6567774-hlq25\" (UID: \"0c3ee07d-c654-4e14-afbd-fa5de3145b4d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.040570 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dtlxh\" (UID: \"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.040597 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e6813b0-5667-42dc-89e2-7c684448700c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.040695 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/bbde335f-1472-40de-b5ab-9867bc9b44cd-profile-collector-cert\") pod \"olm-operator-6b444d44fb-q68m6\" (UID: \"bbde335f-1472-40de-b5ab-9867bc9b44cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.040799 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/198fd913-6670-4880-874e-cce2c186c203-service-ca-bundle\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.040921 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e6813b0-5667-42dc-89e2-7c684448700c-audit-dir\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.040997 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eada1141-ae25-4c7c-8493-1c12594dfa9e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-j8pvq\" (UID: \"eada1141-ae25-4c7c-8493-1c12594dfa9e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041115 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dtlxh\" (UID: \"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041234 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-vzwh5"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041243 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0c56a31a-563f-420c-8ab8-583dc3f757f6-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041368 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc8zb\" (UniqueName: \"kubernetes.io/projected/0c56a31a-563f-420c-8ab8-583dc3f757f6-kube-api-access-nc8zb\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041455 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5z5f\" (UniqueName: \"kubernetes.io/projected/c9b86c64-f8b8-470e-ad5b-723c1122418f-kube-api-access-n5z5f\") pod \"openshift-controller-manager-operator-756b6f6bc6-c6mkp\" (UID: \"c9b86c64-f8b8-470e-ad5b-723c1122418f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041524 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-sb6qj\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041604 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0c3ee07d-c654-4e14-afbd-fa5de3145b4d-proxy-tls\") pod \"machine-config-controller-84d6567774-hlq25\" (UID: \"0c3ee07d-c654-4e14-afbd-fa5de3145b4d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041669 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eada1141-ae25-4c7c-8493-1c12594dfa9e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-j8pvq\" (UID: \"eada1141-ae25-4c7c-8493-1c12594dfa9e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041737 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhdwg\" (UniqueName: \"kubernetes.io/projected/53620b74-a029-497b-99e5-ee35b5f45b7d-kube-api-access-mhdwg\") pod \"migrator-59844c95c7-f4fjj\" (UID: \"53620b74-a029-497b-99e5-ee35b5f45b7d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041817 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ad352c45-ea79-4ecd-a971-cd8d0ab2c046-srv-cert\") pod \"catalog-operator-68c6474976-czsvt\" (UID: \"ad352c45-ea79-4ecd-a971-cd8d0ab2c046\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041912 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3e6813b0-5667-42dc-89e2-7c684448700c-encryption-config\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.041995 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0c56a31a-563f-420c-8ab8-583dc3f757f6-images\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.042106 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-sb6qj\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.042453 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ad352c45-ea79-4ecd-a971-cd8d0ab2c046-profile-collector-cert\") pod \"catalog-operator-68c6474976-czsvt\" (UID: \"ad352c45-ea79-4ecd-a971-cd8d0ab2c046\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.042630 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.042953 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hm2b\" (UniqueName: \"kubernetes.io/projected/198fd913-6670-4880-874e-cce2c186c203-kube-api-access-4hm2b\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043072 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/198fd913-6670-4880-874e-cce2c186c203-stats-auth\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043202 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ws2x\" (UniqueName: \"kubernetes.io/projected/bbde335f-1472-40de-b5ab-9867bc9b44cd-kube-api-access-2ws2x\") pod \"olm-operator-6b444d44fb-q68m6\" (UID: \"bbde335f-1472-40de-b5ab-9867bc9b44cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043283 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e6813b0-5667-42dc-89e2-7c684448700c-audit-policies\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043350 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smtpz\" (UniqueName: \"kubernetes.io/projected/3e6813b0-5667-42dc-89e2-7c684448700c-kube-api-access-smtpz\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043444 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4thv\" (UniqueName: \"kubernetes.io/projected/05a0ee0a-7b86-490a-8638-8d74ad1446ea-kube-api-access-h4thv\") pod \"marketplace-operator-79b997595-sb6qj\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043529 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/198fd913-6670-4880-874e-cce2c186c203-metrics-certs\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043607 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw72h\" (UniqueName: \"kubernetes.io/projected/eada1141-ae25-4c7c-8493-1c12594dfa9e-kube-api-access-qw72h\") pod \"openshift-apiserver-operator-796bbdcf4f-j8pvq\" (UID: \"eada1141-ae25-4c7c-8493-1c12594dfa9e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043656 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-s4zp9"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043733 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9pfq\" (UniqueName: \"kubernetes.io/projected/17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4-kube-api-access-q9pfq\") pod \"kube-storage-version-migrator-operator-b67b599dd-dtlxh\" (UID: \"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043801 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/bbde335f-1472-40de-b5ab-9867bc9b44cd-srv-cert\") pod \"olm-operator-6b444d44fb-q68m6\" (UID: \"bbde335f-1472-40de-b5ab-9867bc9b44cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043757 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/bbde335f-1472-40de-b5ab-9867bc9b44cd-profile-collector-cert\") pod \"olm-operator-6b444d44fb-q68m6\" (UID: \"bbde335f-1472-40de-b5ab-9867bc9b44cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043930 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9b86c64-f8b8-470e-ad5b-723c1122418f-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-c6mkp\" (UID: \"c9b86c64-f8b8-470e-ad5b-723c1122418f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.043532 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-sb6qj\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044061 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/198fd913-6670-4880-874e-cce2c186c203-default-certificate\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044145 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3e6813b0-5667-42dc-89e2-7c684448700c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044250 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca48c342-a03e-4ed6-9b9e-de6592300188-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-kfm7d\" (UID: \"ca48c342-a03e-4ed6-9b9e-de6592300188\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044331 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9b86c64-f8b8-470e-ad5b-723c1122418f-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-c6mkp\" (UID: \"c9b86c64-f8b8-470e-ad5b-723c1122418f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044415 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0c3ee07d-c654-4e14-afbd-fa5de3145b4d-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-hlq25\" (UID: \"0c3ee07d-c654-4e14-afbd-fa5de3145b4d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044503 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm9g5\" (UniqueName: \"kubernetes.io/projected/ca48c342-a03e-4ed6-9b9e-de6592300188-kube-api-access-nm9g5\") pod \"multus-admission-controller-857f4d67dd-kfm7d\" (UID: \"ca48c342-a03e-4ed6-9b9e-de6592300188\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044576 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3e6813b0-5667-42dc-89e2-7c684448700c-etcd-client\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044648 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0c56a31a-563f-420c-8ab8-583dc3f757f6-proxy-tls\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044513 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9b86c64-f8b8-470e-ad5b-723c1122418f-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-c6mkp\" (UID: \"c9b86c64-f8b8-470e-ad5b-723c1122418f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044788 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e6813b0-5667-42dc-89e2-7c684448700c-serving-cert\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.044907 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.045790 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.045980 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/bbde335f-1472-40de-b5ab-9867bc9b44cd-srv-cert\") pod \"olm-operator-6b444d44fb-q68m6\" (UID: \"bbde335f-1472-40de-b5ab-9867bc9b44cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.046748 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-sb6qj\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.046899 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.047311 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9b86c64-f8b8-470e-ad5b-723c1122418f-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-c6mkp\" (UID: \"c9b86c64-f8b8-470e-ad5b-723c1122418f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.047578 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-8bhxb"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.061313 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-p4c46"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.064193 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-zth84"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.064518 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.064933 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-zth84" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.065145 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-zth84"] Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.066896 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.086552 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.107004 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.112951 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dtlxh\" (UID: \"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.127715 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.146963 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3e6813b0-5667-42dc-89e2-7c684448700c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147016 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca48c342-a03e-4ed6-9b9e-de6592300188-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-kfm7d\" (UID: \"ca48c342-a03e-4ed6-9b9e-de6592300188\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147070 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0c3ee07d-c654-4e14-afbd-fa5de3145b4d-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-hlq25\" (UID: \"0c3ee07d-c654-4e14-afbd-fa5de3145b4d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147100 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm9g5\" (UniqueName: \"kubernetes.io/projected/ca48c342-a03e-4ed6-9b9e-de6592300188-kube-api-access-nm9g5\") pod \"multus-admission-controller-857f4d67dd-kfm7d\" (UID: \"ca48c342-a03e-4ed6-9b9e-de6592300188\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147109 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147119 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3e6813b0-5667-42dc-89e2-7c684448700c-etcd-client\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147309 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0c56a31a-563f-420c-8ab8-583dc3f757f6-proxy-tls\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147337 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e6813b0-5667-42dc-89e2-7c684448700c-serving-cert\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147376 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxghw\" (UniqueName: \"kubernetes.io/projected/ad352c45-ea79-4ecd-a971-cd8d0ab2c046-kube-api-access-bxghw\") pod \"catalog-operator-68c6474976-czsvt\" (UID: \"ad352c45-ea79-4ecd-a971-cd8d0ab2c046\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147414 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb7p4\" (UniqueName: \"kubernetes.io/projected/0c3ee07d-c654-4e14-afbd-fa5de3145b4d-kube-api-access-qb7p4\") pod \"machine-config-controller-84d6567774-hlq25\" (UID: \"0c3ee07d-c654-4e14-afbd-fa5de3145b4d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147453 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e6813b0-5667-42dc-89e2-7c684448700c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147485 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eada1141-ae25-4c7c-8493-1c12594dfa9e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-j8pvq\" (UID: \"eada1141-ae25-4c7c-8493-1c12594dfa9e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147518 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/198fd913-6670-4880-874e-cce2c186c203-service-ca-bundle\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147538 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e6813b0-5667-42dc-89e2-7c684448700c-audit-dir\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147592 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0c56a31a-563f-420c-8ab8-583dc3f757f6-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147610 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc8zb\" (UniqueName: \"kubernetes.io/projected/0c56a31a-563f-420c-8ab8-583dc3f757f6-kube-api-access-nc8zb\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147650 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0c3ee07d-c654-4e14-afbd-fa5de3145b4d-proxy-tls\") pod \"machine-config-controller-84d6567774-hlq25\" (UID: \"0c3ee07d-c654-4e14-afbd-fa5de3145b4d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147667 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eada1141-ae25-4c7c-8493-1c12594dfa9e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-j8pvq\" (UID: \"eada1141-ae25-4c7c-8493-1c12594dfa9e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147690 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ad352c45-ea79-4ecd-a971-cd8d0ab2c046-srv-cert\") pod \"catalog-operator-68c6474976-czsvt\" (UID: \"ad352c45-ea79-4ecd-a971-cd8d0ab2c046\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147708 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3e6813b0-5667-42dc-89e2-7c684448700c-encryption-config\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147736 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0c56a31a-563f-420c-8ab8-583dc3f757f6-images\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147775 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ad352c45-ea79-4ecd-a971-cd8d0ab2c046-profile-collector-cert\") pod \"catalog-operator-68c6474976-czsvt\" (UID: \"ad352c45-ea79-4ecd-a971-cd8d0ab2c046\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147789 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hm2b\" (UniqueName: \"kubernetes.io/projected/198fd913-6670-4880-874e-cce2c186c203-kube-api-access-4hm2b\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147825 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/198fd913-6670-4880-874e-cce2c186c203-stats-auth\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147889 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e6813b0-5667-42dc-89e2-7c684448700c-audit-policies\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147910 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smtpz\" (UniqueName: \"kubernetes.io/projected/3e6813b0-5667-42dc-89e2-7c684448700c-kube-api-access-smtpz\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147962 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw72h\" (UniqueName: \"kubernetes.io/projected/eada1141-ae25-4c7c-8493-1c12594dfa9e-kube-api-access-qw72h\") pod \"openshift-apiserver-operator-796bbdcf4f-j8pvq\" (UID: \"eada1141-ae25-4c7c-8493-1c12594dfa9e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.147980 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/198fd913-6670-4880-874e-cce2c186c203-metrics-certs\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.148009 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/198fd913-6670-4880-874e-cce2c186c203-default-certificate\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.148272 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3e6813b0-5667-42dc-89e2-7c684448700c-audit-dir\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.148946 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0c3ee07d-c654-4e14-afbd-fa5de3145b4d-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-hlq25\" (UID: \"0c3ee07d-c654-4e14-afbd-fa5de3145b4d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.149302 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0c56a31a-563f-420c-8ab8-583dc3f757f6-auth-proxy-config\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.151332 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ad352c45-ea79-4ecd-a971-cd8d0ab2c046-profile-collector-cert\") pod \"catalog-operator-68c6474976-czsvt\" (UID: \"ad352c45-ea79-4ecd-a971-cd8d0ab2c046\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.154165 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dtlxh\" (UID: \"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.167395 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.170597 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0c3ee07d-c654-4e14-afbd-fa5de3145b4d-proxy-tls\") pod \"machine-config-controller-84d6567774-hlq25\" (UID: \"0c3ee07d-c654-4e14-afbd-fa5de3145b4d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.187397 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.207151 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.211107 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ca48c342-a03e-4ed6-9b9e-de6592300188-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-kfm7d\" (UID: \"ca48c342-a03e-4ed6-9b9e-de6592300188\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.227605 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.247035 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.266890 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.286873 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.290907 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/198fd913-6670-4880-874e-cce2c186c203-default-certificate\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.306890 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.311391 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/198fd913-6670-4880-874e-cce2c186c203-stats-auth\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.326581 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.328957 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/198fd913-6670-4880-874e-cce2c186c203-service-ca-bundle\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.347509 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.349776 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0c56a31a-563f-420c-8ab8-583dc3f757f6-images\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.367049 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.386881 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.391111 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/198fd913-6670-4880-874e-cce2c186c203-metrics-certs\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.408302 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.427699 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.430832 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0c56a31a-563f-420c-8ab8-583dc3f757f6-proxy-tls\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.437589 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.437604 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.467459 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.486760 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.507123 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.511078 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eada1141-ae25-4c7c-8493-1c12594dfa9e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-j8pvq\" (UID: \"eada1141-ae25-4c7c-8493-1c12594dfa9e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.527144 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.529435 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eada1141-ae25-4c7c-8493-1c12594dfa9e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-j8pvq\" (UID: \"eada1141-ae25-4c7c-8493-1c12594dfa9e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.547812 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.567352 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.569528 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3e6813b0-5667-42dc-89e2-7c684448700c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.586863 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.589684 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3e6813b0-5667-42dc-89e2-7c684448700c-etcd-client\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.606866 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.610421 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e6813b0-5667-42dc-89e2-7c684448700c-serving-cert\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.626686 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.630793 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3e6813b0-5667-42dc-89e2-7c684448700c-encryption-config\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.647643 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.667389 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.687018 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.706967 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.709364 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3e6813b0-5667-42dc-89e2-7c684448700c-audit-policies\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.726742 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.728654 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3e6813b0-5667-42dc-89e2-7c684448700c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.746807 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.750966 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ad352c45-ea79-4ecd-a971-cd8d0ab2c046-srv-cert\") pod \"catalog-operator-68c6474976-czsvt\" (UID: \"ad352c45-ea79-4ecd-a971-cd8d0ab2c046\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.778986 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c429\" (UniqueName: \"kubernetes.io/projected/af74f854-fe31-479b-b8c7-83ec85b6d279-kube-api-access-2c429\") pod \"machine-api-operator-5694c8668f-vl9xd\" (UID: \"af74f854-fe31-479b-b8c7-83ec85b6d279\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.797569 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx5tz\" (UniqueName: \"kubernetes.io/projected/aa815e7b-9b9b-4dd9-bd08-0104024e227e-kube-api-access-wx5tz\") pod \"route-controller-manager-6576b87f9c-z6czh\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.818315 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8d9f\" (UniqueName: \"kubernetes.io/projected/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-kube-api-access-m8d9f\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.838003 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfckt\" (UniqueName: \"kubernetes.io/projected/a0421328-5a0e-4e84-ba97-1926057962e6-kube-api-access-kfckt\") pod \"oauth-openshift-558db77b4-2d74v\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.858100 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xc2n\" (UniqueName: \"kubernetes.io/projected/1fade707-6af5-462b-bc3b-421465649292-kube-api-access-5xc2n\") pod \"machine-approver-56656f9798-g2gzv\" (UID: \"1fade707-6af5-462b-bc3b-421465649292\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.878031 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/ee15157b-cc93-408f-9520-421d06b48f34-kube-api-access-5b7x6\") pod \"controller-manager-879f6c89f-kv6rz\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.881960 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.899797 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6z7s6\" (UniqueName: \"kubernetes.io/projected/61b9d2cc-8f4e-41b9-a186-c6f1613da80d-kube-api-access-6z7s6\") pod \"cluster-samples-operator-665b6dd947-6cm4x\" (UID: \"61b9d2cc-8f4e-41b9-a186-c6f1613da80d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.924572 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ncvn\" (UniqueName: \"kubernetes.io/projected/abfcd3ff-81af-4eff-91d1-3329b3b437af-kube-api-access-5ncvn\") pod \"apiserver-76f77b778f-284jz\" (UID: \"abfcd3ff-81af-4eff-91d1-3329b3b437af\") " pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.932583 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.945730 4492 request.go:700] Waited for 1.01156964s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns-operator/serviceaccounts/dns-operator/token Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.948520 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/185556de-8388-45e5-b20e-f5f7fca74dd4-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-62jrz\" (UID: \"185556de-8388-45e5-b20e-f5f7fca74dd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.959204 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67rfq\" (UniqueName: \"kubernetes.io/projected/6498f1f9-685a-4a0f-a108-ad49ed7b7576-kube-api-access-67rfq\") pod \"dns-operator-744455d44c-b65ps\" (UID: \"6498f1f9-685a-4a0f-a108-ad49ed7b7576\") " pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.970287 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.976826 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.980071 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dsh7\" (UniqueName: \"kubernetes.io/projected/85b4355d-1d0f-4cf3-9902-4b68bd36704a-kube-api-access-2dsh7\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:22 crc kubenswrapper[4492]: I1126 06:50:22.997671 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.002235 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkmck\" (UniqueName: \"kubernetes.io/projected/774cf983-095e-498f-9d55-6b7c5be37265-kube-api-access-fkmck\") pod \"openshift-config-operator-7777fb866f-vvl6g\" (UID: \"774cf983-095e-498f-9d55-6b7c5be37265\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.010135 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.016444 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.024004 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zh69p\" (UniqueName: \"kubernetes.io/projected/ab14021b-87d7-43d0-9357-e8739e2d7dd1-kube-api-access-zh69p\") pod \"console-f9d7485db-v92zj\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.044048 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85b4355d-1d0f-4cf3-9902-4b68bd36704a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8q29r\" (UID: \"85b4355d-1d0f-4cf3-9902-4b68bd36704a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.047949 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.066810 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.070668 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c-bound-sa-token\") pod \"ingress-operator-5b745b69d9-txj8k\" (UID: \"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.085798 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.086817 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.107550 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.128191 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.147823 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.169615 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.184136 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.187353 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.208163 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.227662 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.243791 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vl9xd"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.244008 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.246697 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2d74v"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.248837 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.249963 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.267391 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.289240 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.290527 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.294336 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-b65ps"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.309021 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.323989 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.328370 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.330749 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.332029 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-kv6rz"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.351045 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.352348 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz"] Nov 26 06:50:23 crc kubenswrapper[4492]: W1126 06:50:23.365442 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6498f1f9_685a_4a0f_a108_ad49ed7b7576.slice/crio-0135a243c48b73af68b7b1d14a072af81aa4ce3ffae88925822e8463b3b726aa WatchSource:0}: Error finding container 0135a243c48b73af68b7b1d14a072af81aa4ce3ffae88925822e8463b3b726aa: Status 404 returned error can't find the container with id 0135a243c48b73af68b7b1d14a072af81aa4ce3ffae88925822e8463b3b726aa Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.384535 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qq4r\" (UniqueName: \"kubernetes.io/projected/587ccafa-460d-41b6-bced-9a82822fa43c-kube-api-access-8qq4r\") pod \"downloads-7954f5f757-zvw72\" (UID: \"587ccafa-460d-41b6-bced-9a82822fa43c\") " pod="openshift-console/downloads-7954f5f757-zvw72" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.408825 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fe63b27-ce8d-40a9-96db-fd485ede156a-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kkhsz\" (UID: \"8fe63b27-ce8d-40a9-96db-fd485ede156a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.425437 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1394fe3d-57d3-4340-9c97-1d75ac5e8ce4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mstsb\" (UID: \"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.431956 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-v92zj"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.438112 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.438957 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.447544 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.450280 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4vg4\" (UniqueName: \"kubernetes.io/projected/815ac909-aa66-4d5c-bbaf-1ef88810cb22-kube-api-access-p4vg4\") pod \"console-operator-58897d9998-7nxg8\" (UID: \"815ac909-aa66-4d5c-bbaf-1ef88810cb22\") " pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:23 crc kubenswrapper[4492]: W1126 06:50:23.459845 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab14021b_87d7_43d0_9357_e8739e2d7dd1.slice/crio-ffb766f75f3c234db5489b1d5ad6ab429fdbfd6516cc6ddc63e738f64f79987f WatchSource:0}: Error finding container ffb766f75f3c234db5489b1d5ad6ab429fdbfd6516cc6ddc63e738f64f79987f: Status 404 returned error can't find the container with id ffb766f75f3c234db5489b1d5ad6ab429fdbfd6516cc6ddc63e738f64f79987f Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.471602 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.493608 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.500312 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.507434 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.508721 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.531846 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.545129 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-284jz"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.548412 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.570416 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.588896 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.605222 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-zvw72" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.607610 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.608113 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.629031 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.644991 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.647452 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.652744 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.669477 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 06:50:23 crc kubenswrapper[4492]: W1126 06:50:23.680669 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod774cf983_095e_498f_9d55_6b7c5be37265.slice/crio-e7b12b382aa3b329b8b8b8fd769670a8474fa32ca24a294dcba2c4ffadb99402 WatchSource:0}: Error finding container e7b12b382aa3b329b8b8b8fd769670a8474fa32ca24a294dcba2c4ffadb99402: Status 404 returned error can't find the container with id e7b12b382aa3b329b8b8b8fd769670a8474fa32ca24a294dcba2c4ffadb99402 Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.681364 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k"] Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.688536 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.708076 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 06:50:23 crc kubenswrapper[4492]: W1126 06:50:23.708722 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b62bab1_bcc8_45a1_8b79_e6fadfce9e3c.slice/crio-63b0f37f5b5d9e81da81d2576ccef1d5fa4733be153fd3607a053a01cd8875f8 WatchSource:0}: Error finding container 63b0f37f5b5d9e81da81d2576ccef1d5fa4733be153fd3607a053a01cd8875f8: Status 404 returned error can't find the container with id 63b0f37f5b5d9e81da81d2576ccef1d5fa4733be153fd3607a053a01cd8875f8 Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.727814 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.747803 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.770101 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.789032 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.806842 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.828026 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.850751 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.873773 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.898379 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.911383 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.929120 4492 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.945395 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" event={"ID":"6498f1f9-685a-4a0f-a108-ad49ed7b7576","Type":"ContainerStarted","Data":"704488fb4dfc4b784fe2a478866eed029b22c6e43a0822aad4ef4530c06dbc90"} Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.945437 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" event={"ID":"6498f1f9-685a-4a0f-a108-ad49ed7b7576","Type":"ContainerStarted","Data":"0135a243c48b73af68b7b1d14a072af81aa4ce3ffae88925822e8463b3b726aa"} Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.945745 4492 request.go:700] Waited for 1.904109327s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/serviceaccounts/openshift-controller-manager-operator/token Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.952364 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-284jz" event={"ID":"abfcd3ff-81af-4eff-91d1-3329b3b437af","Type":"ContainerStarted","Data":"718f3fdad0bca76c412b1fadcfeddb468db6a3c9ea05de2b5d7e2413c3704944"} Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.957799 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" event={"ID":"aa815e7b-9b9b-4dd9-bd08-0104024e227e","Type":"ContainerStarted","Data":"f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4"} Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.957853 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" event={"ID":"aa815e7b-9b9b-4dd9-bd08-0104024e227e","Type":"ContainerStarted","Data":"5204e0abc55469ef1611bc884b70ae8f02af347523d07a13dc953d3df88ebd17"} Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.959077 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.972416 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" event={"ID":"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c","Type":"ContainerStarted","Data":"63b0f37f5b5d9e81da81d2576ccef1d5fa4733be153fd3607a053a01cd8875f8"} Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.984862 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5z5f\" (UniqueName: \"kubernetes.io/projected/c9b86c64-f8b8-470e-ad5b-723c1122418f-kube-api-access-n5z5f\") pod \"openshift-controller-manager-operator-756b6f6bc6-c6mkp\" (UID: \"c9b86c64-f8b8-470e-ad5b-723c1122418f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.986713 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" event={"ID":"61b9d2cc-8f4e-41b9-a186-c6f1613da80d","Type":"ContainerStarted","Data":"3b1326e362e3108f0e3fffdd8eba5a538cd2966b0c121f679363e9a248030aa2"} Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.986756 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" event={"ID":"61b9d2cc-8f4e-41b9-a186-c6f1613da80d","Type":"ContainerStarted","Data":"b6363805103d9eab2a53cd33945826c0a1f14cc063fa10b99cbfd30777d0138f"} Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.988816 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" event={"ID":"85b4355d-1d0f-4cf3-9902-4b68bd36704a","Type":"ContainerStarted","Data":"b4bd16c06e72fbe865705b9d57642f6e240d2044251cbb90446d5edd071027c1"} Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.992802 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhdwg\" (UniqueName: \"kubernetes.io/projected/53620b74-a029-497b-99e5-ee35b5f45b7d-kube-api-access-mhdwg\") pod \"migrator-59844c95c7-f4fjj\" (UID: \"53620b74-a029-497b-99e5-ee35b5f45b7d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj" Nov 26 06:50:23 crc kubenswrapper[4492]: I1126 06:50:23.998312 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" event={"ID":"774cf983-095e-498f-9d55-6b7c5be37265","Type":"ContainerStarted","Data":"e7b12b382aa3b329b8b8b8fd769670a8474fa32ca24a294dcba2c4ffadb99402"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.000114 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" event={"ID":"185556de-8388-45e5-b20e-f5f7fca74dd4","Type":"ContainerStarted","Data":"735791a8af725640120660a2b3c5f6d56214ff84cbff74a5811ee17532275f98"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.000141 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" event={"ID":"185556de-8388-45e5-b20e-f5f7fca74dd4","Type":"ContainerStarted","Data":"6287f554109d996f3b8fd7e588bf8d381714d5f6cda97337c24df772e5883126"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.002668 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" event={"ID":"ee15157b-cc93-408f-9520-421d06b48f34","Type":"ContainerStarted","Data":"66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.002693 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" event={"ID":"ee15157b-cc93-408f-9520-421d06b48f34","Type":"ContainerStarted","Data":"5b1914e4b84c489a17be73614ede47288428721d6e1eb3455a82fca8da501a17"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.003279 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.010880 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" event={"ID":"af74f854-fe31-479b-b8c7-83ec85b6d279","Type":"ContainerStarted","Data":"31d1ffe87d70a1d49d5c605b5842074bdc1a77b754509fb9ab978b8e11f127f8"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.010913 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" event={"ID":"af74f854-fe31-479b-b8c7-83ec85b6d279","Type":"ContainerStarted","Data":"2d024e58635c0ac77aa18eadc0ea583bfbea06d827d5290f57cdbdad8c2be453"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.010925 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" event={"ID":"af74f854-fe31-479b-b8c7-83ec85b6d279","Type":"ContainerStarted","Data":"0649b8b064d81b35002af6435091ca40b7e36d3438fe402053a3b4209225ebaf"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.011737 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ws2x\" (UniqueName: \"kubernetes.io/projected/bbde335f-1472-40de-b5ab-9867bc9b44cd-kube-api-access-2ws2x\") pod \"olm-operator-6b444d44fb-q68m6\" (UID: \"bbde335f-1472-40de-b5ab-9867bc9b44cd\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.012260 4492 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-kv6rz container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.012295 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" podUID="ee15157b-cc93-408f-9520-421d06b48f34" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.027668 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" event={"ID":"1fade707-6af5-462b-bc3b-421465649292","Type":"ContainerStarted","Data":"231816ac8f727e1ecc638fedec96f64d60d524dfab148519271138c32fd8da39"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.027707 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" event={"ID":"1fade707-6af5-462b-bc3b-421465649292","Type":"ContainerStarted","Data":"7922245909f30dfdd17ef4a4ef422cd39f1fbd5d75aacc2024f81a9b608d1465"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.027717 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" event={"ID":"1fade707-6af5-462b-bc3b-421465649292","Type":"ContainerStarted","Data":"f851e8c29b4f876a863f9b33ee66353379bff6aa97991ffa6e17b1d936fbd7d9"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.052121 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4thv\" (UniqueName: \"kubernetes.io/projected/05a0ee0a-7b86-490a-8638-8d74ad1446ea-kube-api-access-h4thv\") pod \"marketplace-operator-79b997595-sb6qj\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.052535 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.053299 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" event={"ID":"a0421328-5a0e-4e84-ba97-1926057962e6","Type":"ContainerStarted","Data":"91af68b25ab19bc9e22765eae1ec84d3b920c76e29ea5162a316a57e870907ef"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.053333 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" event={"ID":"a0421328-5a0e-4e84-ba97-1926057962e6","Type":"ContainerStarted","Data":"deb95b8b8ef44ecc4fdc442e78af73b544990fcbc79f386e5125605a1d277cb9"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.054262 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.054359 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.060306 4492 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-2d74v container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.27:6443/healthz\": dial tcp 10.217.0.27:6443: connect: connection refused" start-of-body= Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.060335 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" podUID="a0421328-5a0e-4e84-ba97-1926057962e6" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.27:6443/healthz\": dial tcp 10.217.0.27:6443: connect: connection refused" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.060499 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.062263 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-zvw72"] Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.066162 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9pfq\" (UniqueName: \"kubernetes.io/projected/17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4-kube-api-access-q9pfq\") pod \"kube-storage-version-migrator-operator-b67b599dd-dtlxh\" (UID: \"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.068648 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.069029 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.078857 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-v92zj" event={"ID":"ab14021b-87d7-43d0-9357-e8739e2d7dd1","Type":"ContainerStarted","Data":"4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.079917 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-v92zj" event={"ID":"ab14021b-87d7-43d0-9357-e8739e2d7dd1","Type":"ContainerStarted","Data":"ffb766f75f3c234db5489b1d5ad6ab429fdbfd6516cc6ddc63e738f64f79987f"} Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.087277 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.088801 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz"] Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.089398 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 06:50:24 crc kubenswrapper[4492]: W1126 06:50:24.094567 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod587ccafa_460d_41b6_bced_9a82822fa43c.slice/crio-0826e1d75bd577b3f91c376f5e695992ac5e2e5e57aac7ed1ebeaff6e9ce41e4 WatchSource:0}: Error finding container 0826e1d75bd577b3f91c376f5e695992ac5e2e5e57aac7ed1ebeaff6e9ce41e4: Status 404 returned error can't find the container with id 0826e1d75bd577b3f91c376f5e695992ac5e2e5e57aac7ed1ebeaff6e9ce41e4 Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.112962 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.130756 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.146884 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb"] Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.152280 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.156014 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.167072 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.187598 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.198669 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-7nxg8"] Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.227733 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm9g5\" (UniqueName: \"kubernetes.io/projected/ca48c342-a03e-4ed6-9b9e-de6592300188-kube-api-access-nm9g5\") pod \"multus-admission-controller-857f4d67dd-kfm7d\" (UID: \"ca48c342-a03e-4ed6-9b9e-de6592300188\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.242816 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc8zb\" (UniqueName: \"kubernetes.io/projected/0c56a31a-563f-420c-8ab8-583dc3f757f6-kube-api-access-nc8zb\") pod \"machine-config-operator-74547568cd-xrjz8\" (UID: \"0c56a31a-563f-420c-8ab8-583dc3f757f6\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.284280 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxghw\" (UniqueName: \"kubernetes.io/projected/ad352c45-ea79-4ecd-a971-cd8d0ab2c046-kube-api-access-bxghw\") pod \"catalog-operator-68c6474976-czsvt\" (UID: \"ad352c45-ea79-4ecd-a971-cd8d0ab2c046\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.321191 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hm2b\" (UniqueName: \"kubernetes.io/projected/198fd913-6670-4880-874e-cce2c186c203-kube-api-access-4hm2b\") pod \"router-default-5444994796-r6lm8\" (UID: \"198fd913-6670-4880-874e-cce2c186c203\") " pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.343354 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb7p4\" (UniqueName: \"kubernetes.io/projected/0c3ee07d-c654-4e14-afbd-fa5de3145b4d-kube-api-access-qb7p4\") pod \"machine-config-controller-84d6567774-hlq25\" (UID: \"0c3ee07d-c654-4e14-afbd-fa5de3145b4d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.349131 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.368874 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw72h\" (UniqueName: \"kubernetes.io/projected/eada1141-ae25-4c7c-8493-1c12594dfa9e-kube-api-access-qw72h\") pod \"openshift-apiserver-operator-796bbdcf4f-j8pvq\" (UID: \"eada1141-ae25-4c7c-8493-1c12594dfa9e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.368926 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smtpz\" (UniqueName: \"kubernetes.io/projected/3e6813b0-5667-42dc-89e2-7c684448700c-kube-api-access-smtpz\") pod \"apiserver-7bbb656c7d-c2wr2\" (UID: \"3e6813b0-5667-42dc-89e2-7c684448700c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.369932 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.447019 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.447454 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.467159 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.470113 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.493562 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.534975 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.535291 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.536803 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.537422 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.541523 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-bound-sa-token\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.541550 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fc580a3-4f35-4c1c-8467-7cec5540712c-serving-cert\") pod \"service-ca-operator-777779d784-6wjd8\" (UID: \"9fc580a3-4f35-4c1c-8467-7cec5540712c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.541638 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-registration-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.541683 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c89a992-675a-4189-9723-1a868ab0e4f5-config\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.541743 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9lzh\" (UniqueName: \"kubernetes.io/projected/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-kube-api-access-g9lzh\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.541840 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.541899 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-certificates\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.541940 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-config\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.541957 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qzxc\" (UniqueName: \"kubernetes.io/projected/8815abe1-5cb1-4baa-97be-479d29d71055-kube-api-access-2qzxc\") pod \"control-plane-machine-set-operator-78cbb6b69f-nptnl\" (UID: \"8815abe1-5cb1-4baa-97be-479d29d71055\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.542001 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2fh4\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-kube-api-access-q2fh4\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.542064 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7zr2\" (UniqueName: \"kubernetes.io/projected/7b1d2d57-f6df-48c9-b763-cd42f141b4f8-kube-api-access-f7zr2\") pod \"machine-config-server-p4c46\" (UID: \"7b1d2d57-f6df-48c9-b763-cd42f141b4f8\") " pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.542086 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/46b3a5d1-cee1-4c83-b13f-94dcf4dee467-metrics-tls\") pod \"dns-default-s4zp9\" (UID: \"46b3a5d1-cee1-4c83-b13f-94dcf4dee467\") " pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.542100 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rhl2\" (UniqueName: \"kubernetes.io/projected/7c5436ae-c6b0-4c8e-b45f-e580fef03690-kube-api-access-8rhl2\") pod \"collect-profiles-29402325-9jvn6\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.542137 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2b02d314-392f-44f3-a88c-57d8852fbcf9-installation-pull-secrets\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.542151 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzxq5\" (UniqueName: \"kubernetes.io/projected/3d7ce394-3661-4a68-be77-51bf1e1d5c94-kube-api-access-dzxq5\") pod \"service-ca-9c57cc56f-v9n6p\" (UID: \"3d7ce394-3661-4a68-be77-51bf1e1d5c94\") " pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549005 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgxqd\" (UniqueName: \"kubernetes.io/projected/78231c38-8677-4dd0-b845-9a498909e94a-kube-api-access-rgxqd\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549057 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbtrk\" (UniqueName: \"kubernetes.io/projected/2c89a992-675a-4189-9723-1a868ab0e4f5-kube-api-access-tbtrk\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549076 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnxzq\" (UniqueName: \"kubernetes.io/projected/868da700-b5be-4495-b927-afcd1bc2c1cd-kube-api-access-vnxzq\") pod \"package-server-manager-789f6589d5-ftkfp\" (UID: \"868da700-b5be-4495-b927-afcd1bc2c1cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549117 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-serving-cert\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549161 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-trusted-ca\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549439 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/288b0000-22a3-4ae5-b07d-46d62037c91e-tmpfs\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549463 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/288b0000-22a3-4ae5-b07d-46d62037c91e-apiservice-cert\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549480 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-mountpoint-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549573 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/3d7ce394-3661-4a68-be77-51bf1e1d5c94-signing-cabundle\") pod \"service-ca-9c57cc56f-v9n6p\" (UID: \"3d7ce394-3661-4a68-be77-51bf1e1d5c94\") " pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549660 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7b1d2d57-f6df-48c9-b763-cd42f141b4f8-certs\") pod \"machine-config-server-p4c46\" (UID: \"7b1d2d57-f6df-48c9-b763-cd42f141b4f8\") " pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.549676 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/3d7ce394-3661-4a68-be77-51bf1e1d5c94-signing-key\") pod \"service-ca-9c57cc56f-v9n6p\" (UID: \"3d7ce394-3661-4a68-be77-51bf1e1d5c94\") " pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.553191 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7c5436ae-c6b0-4c8e-b45f-e580fef03690-config-volume\") pod \"collect-profiles-29402325-9jvn6\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.554104 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.556566 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:24 crc kubenswrapper[4492]: E1126 06:50:24.558534 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.058514635 +0000 UTC m=+120.942402934 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558691 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c89a992-675a-4189-9723-1a868ab0e4f5-serving-cert\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558711 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-socket-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558727 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-plugins-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558767 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqxdt\" (UniqueName: \"kubernetes.io/projected/9fc580a3-4f35-4c1c-8467-7cec5540712c-kube-api-access-pqxdt\") pod \"service-ca-operator-777779d784-6wjd8\" (UID: \"9fc580a3-4f35-4c1c-8467-7cec5540712c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558784 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-etcd-client\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558812 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fc580a3-4f35-4c1c-8467-7cec5540712c-config\") pod \"service-ca-operator-777779d784-6wjd8\" (UID: \"9fc580a3-4f35-4c1c-8467-7cec5540712c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558874 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxzh8\" (UniqueName: \"kubernetes.io/projected/46b3a5d1-cee1-4c83-b13f-94dcf4dee467-kube-api-access-gxzh8\") pod \"dns-default-s4zp9\" (UID: \"46b3a5d1-cee1-4c83-b13f-94dcf4dee467\") " pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558917 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-tls\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558962 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/868da700-b5be-4495-b927-afcd1bc2c1cd-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-ftkfp\" (UID: \"868da700-b5be-4495-b927-afcd1bc2c1cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.558979 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7b1d2d57-f6df-48c9-b763-cd42f141b4f8-node-bootstrap-token\") pod \"machine-config-server-p4c46\" (UID: \"7b1d2d57-f6df-48c9-b763-cd42f141b4f8\") " pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559003 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2b02d314-392f-44f3-a88c-57d8852fbcf9-ca-trust-extracted\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559029 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c89a992-675a-4189-9723-1a868ab0e4f5-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559052 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-csi-data-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559109 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bmr9\" (UniqueName: \"kubernetes.io/projected/288b0000-22a3-4ae5-b07d-46d62037c91e-kube-api-access-9bmr9\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559127 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-etcd-ca\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559159 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-etcd-service-ca\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559211 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/288b0000-22a3-4ae5-b07d-46d62037c91e-webhook-cert\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559256 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7c5436ae-c6b0-4c8e-b45f-e580fef03690-secret-volume\") pod \"collect-profiles-29402325-9jvn6\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559275 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8815abe1-5cb1-4baa-97be-479d29d71055-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nptnl\" (UID: \"8815abe1-5cb1-4baa-97be-479d29d71055\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559293 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46b3a5d1-cee1-4c83-b13f-94dcf4dee467-config-volume\") pod \"dns-default-s4zp9\" (UID: \"46b3a5d1-cee1-4c83-b13f-94dcf4dee467\") " pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.559330 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c89a992-675a-4189-9723-1a868ab0e4f5-service-ca-bundle\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.574758 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.577473 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.590278 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.597920 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.613933 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.621229 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.660949 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.661652 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-certificates\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.661692 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-config\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.661809 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qzxc\" (UniqueName: \"kubernetes.io/projected/8815abe1-5cb1-4baa-97be-479d29d71055-kube-api-access-2qzxc\") pod \"control-plane-machine-set-operator-78cbb6b69f-nptnl\" (UID: \"8815abe1-5cb1-4baa-97be-479d29d71055\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.661840 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c6597a61-a709-458b-9fd4-1c1213fda7c0-cert\") pod \"ingress-canary-zth84\" (UID: \"c6597a61-a709-458b-9fd4-1c1213fda7c0\") " pod="openshift-ingress-canary/ingress-canary-zth84" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.661862 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2fh4\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-kube-api-access-q2fh4\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: E1126 06:50:24.662160 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.162141533 +0000 UTC m=+121.046029830 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662307 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7zr2\" (UniqueName: \"kubernetes.io/projected/7b1d2d57-f6df-48c9-b763-cd42f141b4f8-kube-api-access-f7zr2\") pod \"machine-config-server-p4c46\" (UID: \"7b1d2d57-f6df-48c9-b763-cd42f141b4f8\") " pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662432 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/46b3a5d1-cee1-4c83-b13f-94dcf4dee467-metrics-tls\") pod \"dns-default-s4zp9\" (UID: \"46b3a5d1-cee1-4c83-b13f-94dcf4dee467\") " pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662458 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rhl2\" (UniqueName: \"kubernetes.io/projected/7c5436ae-c6b0-4c8e-b45f-e580fef03690-kube-api-access-8rhl2\") pod \"collect-profiles-29402325-9jvn6\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662480 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2b02d314-392f-44f3-a88c-57d8852fbcf9-installation-pull-secrets\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662621 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzxq5\" (UniqueName: \"kubernetes.io/projected/3d7ce394-3661-4a68-be77-51bf1e1d5c94-kube-api-access-dzxq5\") pod \"service-ca-9c57cc56f-v9n6p\" (UID: \"3d7ce394-3661-4a68-be77-51bf1e1d5c94\") " pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662765 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgxqd\" (UniqueName: \"kubernetes.io/projected/78231c38-8677-4dd0-b845-9a498909e94a-kube-api-access-rgxqd\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662792 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbtrk\" (UniqueName: \"kubernetes.io/projected/2c89a992-675a-4189-9723-1a868ab0e4f5-kube-api-access-tbtrk\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662914 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnxzq\" (UniqueName: \"kubernetes.io/projected/868da700-b5be-4495-b927-afcd1bc2c1cd-kube-api-access-vnxzq\") pod \"package-server-manager-789f6589d5-ftkfp\" (UID: \"868da700-b5be-4495-b927-afcd1bc2c1cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662939 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-serving-cert\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.662957 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-trusted-ca\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663079 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/288b0000-22a3-4ae5-b07d-46d62037c91e-tmpfs\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663101 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/288b0000-22a3-4ae5-b07d-46d62037c91e-apiservice-cert\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663119 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-mountpoint-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663218 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/3d7ce394-3661-4a68-be77-51bf1e1d5c94-signing-cabundle\") pod \"service-ca-9c57cc56f-v9n6p\" (UID: \"3d7ce394-3661-4a68-be77-51bf1e1d5c94\") " pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663245 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7b1d2d57-f6df-48c9-b763-cd42f141b4f8-certs\") pod \"machine-config-server-p4c46\" (UID: \"7b1d2d57-f6df-48c9-b763-cd42f141b4f8\") " pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663370 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/3d7ce394-3661-4a68-be77-51bf1e1d5c94-signing-key\") pod \"service-ca-9c57cc56f-v9n6p\" (UID: \"3d7ce394-3661-4a68-be77-51bf1e1d5c94\") " pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663390 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7c5436ae-c6b0-4c8e-b45f-e580fef03690-config-volume\") pod \"collect-profiles-29402325-9jvn6\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663408 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-plugins-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663536 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c89a992-675a-4189-9723-1a868ab0e4f5-serving-cert\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663555 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-socket-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663558 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/288b0000-22a3-4ae5-b07d-46d62037c91e-tmpfs\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663687 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fc580a3-4f35-4c1c-8467-7cec5540712c-config\") pod \"service-ca-operator-777779d784-6wjd8\" (UID: \"9fc580a3-4f35-4c1c-8467-7cec5540712c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663711 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqxdt\" (UniqueName: \"kubernetes.io/projected/9fc580a3-4f35-4c1c-8467-7cec5540712c-kube-api-access-pqxdt\") pod \"service-ca-operator-777779d784-6wjd8\" (UID: \"9fc580a3-4f35-4c1c-8467-7cec5540712c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663852 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-etcd-client\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663882 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxzh8\" (UniqueName: \"kubernetes.io/projected/46b3a5d1-cee1-4c83-b13f-94dcf4dee467-kube-api-access-gxzh8\") pod \"dns-default-s4zp9\" (UID: \"46b3a5d1-cee1-4c83-b13f-94dcf4dee467\") " pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664046 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-tls\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663689 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-mountpoint-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664113 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-socket-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664191 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/868da700-b5be-4495-b927-afcd1bc2c1cd-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-ftkfp\" (UID: \"868da700-b5be-4495-b927-afcd1bc2c1cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.663981 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-plugins-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664449 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-trusted-ca\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664579 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7b1d2d57-f6df-48c9-b763-cd42f141b4f8-node-bootstrap-token\") pod \"machine-config-server-p4c46\" (UID: \"7b1d2d57-f6df-48c9-b763-cd42f141b4f8\") " pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664608 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2b02d314-392f-44f3-a88c-57d8852fbcf9-ca-trust-extracted\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664729 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c89a992-675a-4189-9723-1a868ab0e4f5-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664760 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-csi-data-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664886 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bmr9\" (UniqueName: \"kubernetes.io/projected/288b0000-22a3-4ae5-b07d-46d62037c91e-kube-api-access-9bmr9\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664908 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-etcd-ca\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.664926 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-etcd-service-ca\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665066 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/288b0000-22a3-4ae5-b07d-46d62037c91e-webhook-cert\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665088 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2b02d314-392f-44f3-a88c-57d8852fbcf9-ca-trust-extracted\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665089 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7c5436ae-c6b0-4c8e-b45f-e580fef03690-secret-volume\") pod \"collect-profiles-29402325-9jvn6\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665189 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hkt5\" (UniqueName: \"kubernetes.io/projected/c6597a61-a709-458b-9fd4-1c1213fda7c0-kube-api-access-5hkt5\") pod \"ingress-canary-zth84\" (UID: \"c6597a61-a709-458b-9fd4-1c1213fda7c0\") " pod="openshift-ingress-canary/ingress-canary-zth84" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665233 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46b3a5d1-cee1-4c83-b13f-94dcf4dee467-config-volume\") pod \"dns-default-s4zp9\" (UID: \"46b3a5d1-cee1-4c83-b13f-94dcf4dee467\") " pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665266 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8815abe1-5cb1-4baa-97be-479d29d71055-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nptnl\" (UID: \"8815abe1-5cb1-4baa-97be-479d29d71055\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665312 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c89a992-675a-4189-9723-1a868ab0e4f5-service-ca-bundle\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665350 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fc580a3-4f35-4c1c-8467-7cec5540712c-serving-cert\") pod \"service-ca-operator-777779d784-6wjd8\" (UID: \"9fc580a3-4f35-4c1c-8467-7cec5540712c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665381 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-bound-sa-token\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665428 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-registration-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665451 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-csi-data-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665476 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c89a992-675a-4189-9723-1a868ab0e4f5-config\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665511 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9lzh\" (UniqueName: \"kubernetes.io/projected/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-kube-api-access-g9lzh\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.665809 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/78231c38-8677-4dd0-b845-9a498909e94a-registration-dir\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.666008 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-certificates\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.672126 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-sb6qj"] Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.700297 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6"] Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.705590 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7c5436ae-c6b0-4c8e-b45f-e580fef03690-secret-volume\") pod \"collect-profiles-29402325-9jvn6\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.708887 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.712969 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-config\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.726228 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2fh4\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-kube-api-access-q2fh4\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.746991 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qzxc\" (UniqueName: \"kubernetes.io/projected/8815abe1-5cb1-4baa-97be-479d29d71055-kube-api-access-2qzxc\") pod \"control-plane-machine-set-operator-78cbb6b69f-nptnl\" (UID: \"8815abe1-5cb1-4baa-97be-479d29d71055\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.763683 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.766529 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2b02d314-392f-44f3-a88c-57d8852fbcf9-installation-pull-secrets\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.766994 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c6597a61-a709-458b-9fd4-1c1213fda7c0-cert\") pod \"ingress-canary-zth84\" (UID: \"c6597a61-a709-458b-9fd4-1c1213fda7c0\") " pod="openshift-ingress-canary/ingress-canary-zth84" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.767287 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hkt5\" (UniqueName: \"kubernetes.io/projected/c6597a61-a709-458b-9fd4-1c1213fda7c0-kube-api-access-5hkt5\") pod \"ingress-canary-zth84\" (UID: \"c6597a61-a709-458b-9fd4-1c1213fda7c0\") " pod="openshift-ingress-canary/ingress-canary-zth84" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.767396 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: E1126 06:50:24.767704 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.26768984 +0000 UTC m=+121.151578139 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.768700 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7zr2\" (UniqueName: \"kubernetes.io/projected/7b1d2d57-f6df-48c9-b763-cd42f141b4f8-kube-api-access-f7zr2\") pod \"machine-config-server-p4c46\" (UID: \"7b1d2d57-f6df-48c9-b763-cd42f141b4f8\") " pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.837727 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp"] Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.842706 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.847851 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/46b3a5d1-cee1-4c83-b13f-94dcf4dee467-metrics-tls\") pod \"dns-default-s4zp9\" (UID: \"46b3a5d1-cee1-4c83-b13f-94dcf4dee467\") " pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.874095 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:24 crc kubenswrapper[4492]: E1126 06:50:24.874816 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.374800006 +0000 UTC m=+121.258688304 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.893987 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnxzq\" (UniqueName: \"kubernetes.io/projected/868da700-b5be-4495-b927-afcd1bc2c1cd-kube-api-access-vnxzq\") pod \"package-server-manager-789f6589d5-ftkfp\" (UID: \"868da700-b5be-4495-b927-afcd1bc2c1cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.905454 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.905642 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.911701 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.920620 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7b1d2d57-f6df-48c9-b763-cd42f141b4f8-certs\") pod \"machine-config-server-p4c46\" (UID: \"7b1d2d57-f6df-48c9-b763-cd42f141b4f8\") " pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.922154 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/288b0000-22a3-4ae5-b07d-46d62037c91e-webhook-cert\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.922682 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-serving-cert\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.938925 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.939662 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/3d7ce394-3661-4a68-be77-51bf1e1d5c94-signing-cabundle\") pod \"service-ca-9c57cc56f-v9n6p\" (UID: \"3d7ce394-3661-4a68-be77-51bf1e1d5c94\") " pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.951189 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/288b0000-22a3-4ae5-b07d-46d62037c91e-apiservice-cert\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.957616 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.969276 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2c89a992-675a-4189-9723-1a868ab0e4f5-serving-cert\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.969483 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.975997 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:24 crc kubenswrapper[4492]: E1126 06:50:24.976285 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.476275318 +0000 UTC m=+121.360163615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.993577 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 06:50:24 crc kubenswrapper[4492]: I1126 06:50:24.994604 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7c5436ae-c6b0-4c8e-b45f-e580fef03690-config-volume\") pod \"collect-profiles-29402325-9jvn6\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.005816 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/3d7ce394-3661-4a68-be77-51bf1e1d5c94-signing-key\") pod \"service-ca-9c57cc56f-v9n6p\" (UID: \"3d7ce394-3661-4a68-be77-51bf1e1d5c94\") " pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.006712 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.016724 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fc580a3-4f35-4c1c-8467-7cec5540712c-config\") pod \"service-ca-operator-777779d784-6wjd8\" (UID: \"9fc580a3-4f35-4c1c-8467-7cec5540712c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.030418 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.040473 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-tls\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.074374 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.088665 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-etcd-client\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.089405 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj"] Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.090768 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.116699 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.616644794 +0000 UTC m=+121.500533092 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.124004 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.139364 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/868da700-b5be-4495-b927-afcd1bc2c1cd-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-ftkfp\" (UID: \"868da700-b5be-4495-b927-afcd1bc2c1cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.139896 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxzh8\" (UniqueName: \"kubernetes.io/projected/46b3a5d1-cee1-4c83-b13f-94dcf4dee467-kube-api-access-gxzh8\") pod \"dns-default-s4zp9\" (UID: \"46b3a5d1-cee1-4c83-b13f-94dcf4dee467\") " pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.159446 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.171916 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7b1d2d57-f6df-48c9-b763-cd42f141b4f8-node-bootstrap-token\") pod \"machine-config-server-p4c46\" (UID: \"7b1d2d57-f6df-48c9-b763-cd42f141b4f8\") " pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.173923 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rhl2\" (UniqueName: \"kubernetes.io/projected/7c5436ae-c6b0-4c8e-b45f-e580fef03690-kube-api-access-8rhl2\") pod \"collect-profiles-29402325-9jvn6\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.178225 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" event={"ID":"6498f1f9-685a-4a0f-a108-ad49ed7b7576","Type":"ContainerStarted","Data":"d270368c43e7d1a8b2a65efdd440c94ecf1c0ec693e46cc9ea8090a96021326e"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.198931 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.199285 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.699272316 +0000 UTC m=+121.583160614 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.200001 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh"] Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.223444 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" event={"ID":"c9b86c64-f8b8-470e-ad5b-723c1122418f","Type":"ContainerStarted","Data":"d338117a4894dd83647b2e21863dcf27e5eaeeca3711d2f03f6f4ed3f1058a21"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.224572 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.228850 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-etcd-service-ca\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.229714 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bmr9\" (UniqueName: \"kubernetes.io/projected/288b0000-22a3-4ae5-b07d-46d62037c91e-kube-api-access-9bmr9\") pod \"packageserver-d55dfcdfc-gx7xk\" (UID: \"288b0000-22a3-4ae5-b07d-46d62037c91e\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.239104 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.252417 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-etcd-ca\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.270622 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.288616 4492 generic.go:334] "Generic (PLEG): container finished" podID="774cf983-095e-498f-9d55-6b7c5be37265" containerID="fb30cf6d59b397568bf7d9a387d1ab5cc33d4e7f9ba6ac45b02f8568360d8278" exitCode=0 Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.288874 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" event={"ID":"774cf983-095e-498f-9d55-6b7c5be37265","Type":"ContainerDied","Data":"fb30cf6d59b397568bf7d9a387d1ab5cc33d4e7f9ba6ac45b02f8568360d8278"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.296166 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c89a992-675a-4189-9723-1a868ab0e4f5-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.300055 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.304418 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.804396127 +0000 UTC m=+121.688284425 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.310763 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.312442 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.313153 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.81313838 +0000 UTC m=+121.697026678 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.330232 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zvw72" event={"ID":"587ccafa-460d-41b6-bced-9a82822fa43c","Type":"ContainerStarted","Data":"5db4dcd44cf9ba53c4100e901147a5da9925da921ddacfa11275e0b5b090fb16"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.330280 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zvw72" event={"ID":"587ccafa-460d-41b6-bced-9a82822fa43c","Type":"ContainerStarted","Data":"0826e1d75bd577b3f91c376f5e695992ac5e2e5e57aac7ed1ebeaff6e9ce41e4"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.331548 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-zvw72" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.333770 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.337444 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/46b3a5d1-cee1-4c83-b13f-94dcf4dee467-config-volume\") pod \"dns-default-s4zp9\" (UID: \"46b3a5d1-cee1-4c83-b13f-94dcf4dee467\") " pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.340163 4492 patch_prober.go:28] interesting pod/downloads-7954f5f757-zvw72 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.340213 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zvw72" podUID="587ccafa-460d-41b6-bced-9a82822fa43c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.341146 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-bound-sa-token\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.344940 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" event={"ID":"8fe63b27-ce8d-40a9-96db-fd485ede156a","Type":"ContainerStarted","Data":"439bbd2ef0a018fdc5cd9eb4697595746f43df6a00a8ea4336f322bc1113ea2f"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.344975 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" event={"ID":"8fe63b27-ce8d-40a9-96db-fd485ede156a","Type":"ContainerStarted","Data":"0eb278e64eb2a3480d1c6ef075c8b043ad9c3aea7612e09901760cfb43db99d0"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.346968 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9fc580a3-4f35-4c1c-8467-7cec5540712c-serving-cert\") pod \"service-ca-operator-777779d784-6wjd8\" (UID: \"9fc580a3-4f35-4c1c-8467-7cec5540712c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.347567 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.349133 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" event={"ID":"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4","Type":"ContainerStarted","Data":"c428f4c91cae9f6d108041d010ce2167b55d9315bf267a487a5fe6e9ddc1bc01"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.352391 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" event={"ID":"61b9d2cc-8f4e-41b9-a186-c6f1613da80d","Type":"ContainerStarted","Data":"05c9574a0455177c779dfc12b60826cda23493e190a28eb6232a5d2af8b0d402"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.359735 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.362563 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c89a992-675a-4189-9723-1a868ab0e4f5-service-ca-bundle\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.363777 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8815abe1-5cb1-4baa-97be-479d29d71055-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nptnl\" (UID: \"8815abe1-5cb1-4baa-97be-479d29d71055\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.368505 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.379070 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c89a992-675a-4189-9723-1a868ab0e4f5-config\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.387949 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" event={"ID":"05a0ee0a-7b86-490a-8638-8d74ad1446ea","Type":"ContainerStarted","Data":"b422421b8ee71fe8d83cd891c6f7c49840b9118d3cbc1874361670252c3eb44a"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.389346 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.397487 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.411251 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.425451 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.427050 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:25.927027187 +0000 UTC m=+121.810915485 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.427742 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-7nxg8" event={"ID":"815ac909-aa66-4d5c-bbaf-1ef88810cb22","Type":"ContainerStarted","Data":"9041784ec521d4f98b64b3e93b04b0d0069ee42dec5e099a2ffa36ba79050567"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.427841 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-7nxg8" event={"ID":"815ac909-aa66-4d5c-bbaf-1ef88810cb22","Type":"ContainerStarted","Data":"28e6a0bd81586c96ab2379e03240db04cdf3927196eb390d541180ccab68c214"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.428409 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.430277 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c6597a61-a709-458b-9fd4-1c1213fda7c0-cert\") pod \"ingress-canary-zth84\" (UID: \"c6597a61-a709-458b-9fd4-1c1213fda7c0\") " pod="openshift-ingress-canary/ingress-canary-zth84" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.444661 4492 patch_prober.go:28] interesting pod/console-operator-58897d9998-7nxg8 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/readyz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.444711 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-7nxg8" podUID="815ac909-aa66-4d5c-bbaf-1ef88810cb22" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.6:8443/readyz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.454826 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.454933 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" event={"ID":"85b4355d-1d0f-4cf3-9902-4b68bd36704a","Type":"ContainerStarted","Data":"edc8faad224909941a5e04ebf7834e3525b27e2b544c8183626d8b91df97d56a"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.471447 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.505516 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.506440 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25"] Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.511667 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.512071 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq"] Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.513562 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-r6lm8" event={"ID":"198fd913-6670-4880-874e-cce2c186c203","Type":"ContainerStarted","Data":"9a4c7384e0e26607425ba339f3dfc98d2e2a3f33bd2b1be78cb1f4c14a633a9a"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.530342 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.530423 4492 generic.go:334] "Generic (PLEG): container finished" podID="abfcd3ff-81af-4eff-91d1-3329b3b437af" containerID="1c14c694690bf76e16ef745fc9c05e6c041035084662883a46d058cdd1942dad" exitCode=0 Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.530499 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-284jz" event={"ID":"abfcd3ff-81af-4eff-91d1-3329b3b437af","Type":"ContainerDied","Data":"1c14c694690bf76e16ef745fc9c05e6c041035084662883a46d058cdd1942dad"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.531401 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.532494 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.032483353 +0000 UTC m=+121.916371650 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.548568 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.551385 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.551702 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" event={"ID":"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c","Type":"ContainerStarted","Data":"b1df2626abcfe96a1e1530a704c159d76154917cf647b7a53d700e19d595d448"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.551728 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" event={"ID":"9b62bab1-bcc8-45a1-8b79-e6fadfce9e3c","Type":"ContainerStarted","Data":"d1116151811ec502855362063b8e95ea39011f6c14b74db5ff2707b9e2737a24"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.565426 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" event={"ID":"bbde335f-1472-40de-b5ab-9867bc9b44cd","Type":"ContainerStarted","Data":"15fa8a3c54657de16df6976575ff192ff601713e82510dc76be46a6cd3afab17"} Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.565539 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.572630 4492 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-q68m6 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/healthz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.572797 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" podUID="bbde335f-1472-40de-b5ab-9867bc9b44cd" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/healthz\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.580466 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.587943 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.588186 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.588219 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.591542 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.594794 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.621367 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.634482 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.635814 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.636276 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-p4c46" Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.637903 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.137887401 +0000 UTC m=+122.021775699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.645621 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.656670 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.156659308 +0000 UTC m=+122.040547606 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.668478 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.695441 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.697705 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.700632 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbtrk\" (UniqueName: \"kubernetes.io/projected/2c89a992-675a-4189-9723-1a868ab0e4f5-kube-api-access-tbtrk\") pod \"authentication-operator-69f744f599-vzwh5\" (UID: \"2c89a992-675a-4189-9723-1a868ab0e4f5\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.708029 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgxqd\" (UniqueName: \"kubernetes.io/projected/78231c38-8677-4dd0-b845-9a498909e94a-kube-api-access-rgxqd\") pod \"csi-hostpathplugin-8bhxb\" (UID: \"78231c38-8677-4dd0-b845-9a498909e94a\") " pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.724769 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.730700 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt"] Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.733995 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.742004 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqxdt\" (UniqueName: \"kubernetes.io/projected/9fc580a3-4f35-4c1c-8467-7cec5540712c-kube-api-access-pqxdt\") pod \"service-ca-operator-777779d784-6wjd8\" (UID: \"9fc580a3-4f35-4c1c-8467-7cec5540712c\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.744665 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9lzh\" (UniqueName: \"kubernetes.io/projected/f84c3a91-f74e-4270-ab0a-7c92f151b3fd-kube-api-access-g9lzh\") pod \"etcd-operator-b45778765-wpd96\" (UID: \"f84c3a91-f74e-4270-ab0a-7c92f151b3fd\") " pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.749252 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.750054 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.750683 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.250666231 +0000 UTC m=+122.134554519 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.761734 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzxq5\" (UniqueName: \"kubernetes.io/projected/3d7ce394-3661-4a68-be77-51bf1e1d5c94-kube-api-access-dzxq5\") pod \"service-ca-9c57cc56f-v9n6p\" (UID: \"3d7ce394-3661-4a68-be77-51bf1e1d5c94\") " pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.773294 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hkt5\" (UniqueName: \"kubernetes.io/projected/c6597a61-a709-458b-9fd4-1c1213fda7c0-kube-api-access-5hkt5\") pod \"ingress-canary-zth84\" (UID: \"c6597a61-a709-458b-9fd4-1c1213fda7c0\") " pod="openshift-ingress-canary/ingress-canary-zth84" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.802689 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.810231 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.819922 4492 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.820089 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.843740 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2"] Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.852996 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.853303 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.353290343 +0000 UTC m=+122.237178642 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.957494 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:25 crc kubenswrapper[4492]: E1126 06:50:25.958241 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.458219207 +0000 UTC m=+122.342107505 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:25 crc kubenswrapper[4492]: W1126 06:50:25.963329 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e6813b0_5667_42dc_89e2_7c684448700c.slice/crio-04186d43161bfb60e7463298162418c7422493d172724c06c7387fe54b5b6171 WatchSource:0}: Error finding container 04186d43161bfb60e7463298162418c7422493d172724c06c7387fe54b5b6171: Status 404 returned error can't find the container with id 04186d43161bfb60e7463298162418c7422493d172724c06c7387fe54b5b6171 Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.971386 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.979907 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.987148 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 06:50:25 crc kubenswrapper[4492]: I1126 06:50:25.996240 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.014606 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.022236 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.036986 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.037249 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.064207 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.064503 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.56449291 +0000 UTC m=+122.448381208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.071072 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.076723 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.087411 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-zth84" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.166734 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.167117 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.667103135 +0000 UTC m=+122.550991434 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.273712 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.273992 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.77397374 +0000 UTC m=+122.657862038 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.323417 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8"] Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.377627 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.377951 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.877927881 +0000 UTC m=+122.761816180 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.378063 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.378451 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.878439664 +0000 UTC m=+122.762327963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.400088 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk"] Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.422998 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-kfm7d"] Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.484778 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.485104 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.985085235 +0000 UTC m=+122.868973534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.485411 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.485757 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:26.98574737 +0000 UTC m=+122.869635668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.585140 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:26 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:26 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:26 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.585207 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.587121 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.587450 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:27.087433619 +0000 UTC m=+122.971321917 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.622859 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6"] Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.630722 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" event={"ID":"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4","Type":"ContainerStarted","Data":"7a4c3384b3460f1dcc58fbf926d3370d3bc45ecf0ceea60bd39044469938d7aa"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.630779 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" event={"ID":"17d0a1a5-425a-424e-bbb7-b8a91d7fb3c4","Type":"ContainerStarted","Data":"da0ca3b69389f93f060a9c80e686df0853207aaf992ca84a7e84878d615f1ccf"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.653084 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" event={"ID":"288b0000-22a3-4ae5-b07d-46d62037c91e","Type":"ContainerStarted","Data":"38800f7c3b5026597a531f735d5272dbbb27f84cc9f10835d4c5c048196add5e"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.689410 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.689889 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:27.18987068 +0000 UTC m=+123.073758977 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.698017 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj" event={"ID":"53620b74-a029-497b-99e5-ee35b5f45b7d","Type":"ContainerStarted","Data":"551769526ed2ae9bfb85cbf97e673954d3a32de9494482bc9f4365b39d5aaf22"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.698081 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj" event={"ID":"53620b74-a029-497b-99e5-ee35b5f45b7d","Type":"ContainerStarted","Data":"f42bb76d569e4c4b9d2dd47c3146c1b71a49ff249089ac5b794e8f061b8694d7"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.698094 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj" event={"ID":"53620b74-a029-497b-99e5-ee35b5f45b7d","Type":"ContainerStarted","Data":"75bc7cf605239afc727a214b7bbb3965d8868aaac0e1fe4600c1d3dbe13abae5"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.750813 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" event={"ID":"0c3ee07d-c654-4e14-afbd-fa5de3145b4d","Type":"ContainerStarted","Data":"862deec5f133f49f020dcc507ed7e671e272b1ebf75224d84c8e5b1857158a63"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.750893 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" event={"ID":"0c3ee07d-c654-4e14-afbd-fa5de3145b4d","Type":"ContainerStarted","Data":"e443c1282a6a3997abedd67540624b2d019ab52a8ff38bb3ef56fc549bdd82d9"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.761525 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp"] Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.769508 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-s4zp9"] Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.784345 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" event={"ID":"774cf983-095e-498f-9d55-6b7c5be37265","Type":"ContainerStarted","Data":"09088b80b2865c81b616237daacc8b469e1546f6b9fa68b0988ff64a3bf2515a"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.784385 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.791485 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.792535 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:27.292518446 +0000 UTC m=+123.176406744 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.807005 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" event={"ID":"ad352c45-ea79-4ecd-a971-cd8d0ab2c046","Type":"ContainerStarted","Data":"573f298979c122ea8096d9571b57da91c58dc703bbc6818c10237b08ac46b192"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.807044 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" event={"ID":"ad352c45-ea79-4ecd-a971-cd8d0ab2c046","Type":"ContainerStarted","Data":"011110ab639ea367b112e2bccdb410ed6cc97e1fcf53bbd8e268f10a4b93c817"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.807409 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.812364 4492 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-czsvt container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.812421 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" podUID="ad352c45-ea79-4ecd-a971-cd8d0ab2c046" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Nov 26 06:50:26 crc kubenswrapper[4492]: W1126 06:50:26.812630 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46b3a5d1_cee1_4c83_b13f_94dcf4dee467.slice/crio-f3d24e53d4602667761774a81efd461617e1b40c69c649cd16336ddba9ffd101 WatchSource:0}: Error finding container f3d24e53d4602667761774a81efd461617e1b40c69c649cd16336ddba9ffd101: Status 404 returned error can't find the container with id f3d24e53d4602667761774a81efd461617e1b40c69c649cd16336ddba9ffd101 Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.860342 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" event={"ID":"c9b86c64-f8b8-470e-ad5b-723c1122418f","Type":"ContainerStarted","Data":"22ce3a13f798ea2e3373bf55ebcf9cb994389b8967464122bd7ba7c1916ded37"} Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.897406 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:26 crc kubenswrapper[4492]: E1126 06:50:26.898695 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:27.398680338 +0000 UTC m=+123.282568636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.956660 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8q29r" podStartSLOduration=98.956630071 podStartE2EDuration="1m38.956630071s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:26.951944293 +0000 UTC m=+122.835832580" watchObservedRunningTime="2025-11-26 06:50:26.956630071 +0000 UTC m=+122.840518369" Nov 26 06:50:26 crc kubenswrapper[4492]: I1126 06:50:26.998852 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.000508 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:27.500479399 +0000 UTC m=+123.384367707 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.004379 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-284jz" event={"ID":"abfcd3ff-81af-4eff-91d1-3329b3b437af","Type":"ContainerStarted","Data":"6bbaa8f1313d34b0eb84479e25941f54d738ffcf29b7c284ffe0be6ff2fbd90a"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.070913 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" event={"ID":"0c56a31a-563f-420c-8ab8-583dc3f757f6","Type":"ContainerStarted","Data":"70b64fffed96825102ee62c8551130ac72d42fe4f66c6bb4b01a1503adc005d2"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.092771 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-p4c46" event={"ID":"7b1d2d57-f6df-48c9-b763-cd42f141b4f8","Type":"ContainerStarted","Data":"ea9d7e276b156f678087179a09f2a6e5693a085128672fd935ae4744f5641dd6"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.094605 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" event={"ID":"3e6813b0-5667-42dc-89e2-7c684448700c","Type":"ContainerStarted","Data":"04186d43161bfb60e7463298162418c7422493d172724c06c7387fe54b5b6171"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.095407 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" event={"ID":"05a0ee0a-7b86-490a-8638-8d74ad1446ea","Type":"ContainerStarted","Data":"b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.096080 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.100292 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-r6lm8" event={"ID":"198fd913-6670-4880-874e-cce2c186c203","Type":"ContainerStarted","Data":"400881467dd89811da9443ca6ecd402aca897999e76c40f9fa8ed1eae5d6e95a"} Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.102318 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:27.602300631 +0000 UTC m=+123.486188929 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.103404 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.117359 4492 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-sb6qj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.117397 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" podUID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.164969 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" event={"ID":"ca48c342-a03e-4ed6-9b9e-de6592300188","Type":"ContainerStarted","Data":"a8ae7e058f84fc83e1cbc551a392d4417c0031484deb2e2a40e5e3155479d479"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.167882 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" podStartSLOduration=99.16786071 podStartE2EDuration="1m39.16786071s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:27.03187705 +0000 UTC m=+122.915765348" watchObservedRunningTime="2025-11-26 06:50:27.16786071 +0000 UTC m=+123.051749008" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.197444 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" event={"ID":"bbde335f-1472-40de-b5ab-9867bc9b44cd","Type":"ContainerStarted","Data":"c0e4daa9e87b1936b345be448726e97324b0e3652af3f3c2719b8d995f7f499e"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.205343 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.206548 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:27.706530422 +0000 UTC m=+123.590418720 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.218821 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.243625 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" event={"ID":"1394fe3d-57d3-4340-9c97-1d75ac5e8ce4","Type":"ContainerStarted","Data":"3f3052ed4180f9e84ad0265aa8c85c56792adf470f7e4372e7cbd30db4c93d3e"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.274924 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-zth84"] Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.298526 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" event={"ID":"eada1141-ae25-4c7c-8493-1c12594dfa9e","Type":"ContainerStarted","Data":"5b8139392b5afccaca63e0842c727d4b4159c59107eb59d6aa94f0e6b7705d74"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.298603 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" event={"ID":"eada1141-ae25-4c7c-8493-1c12594dfa9e","Type":"ContainerStarted","Data":"0031b9953504e8a384a5032ff04b59d3598b426df3217ea67ffaf07726eae2de"} Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.303279 4492 patch_prober.go:28] interesting pod/downloads-7954f5f757-zvw72 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.303348 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zvw72" podUID="587ccafa-460d-41b6-bced-9a82822fa43c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.311343 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.312641 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:27.812622792 +0000 UTC m=+123.696511090 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.326412 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl"] Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.396185 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-txj8k" podStartSLOduration=99.396124499 podStartE2EDuration="1m39.396124499s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:27.342286986 +0000 UTC m=+123.226175284" watchObservedRunningTime="2025-11-26 06:50:27.396124499 +0000 UTC m=+123.280012797" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.428864 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.430851 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:27.930824387 +0000 UTC m=+123.814712685 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.434417 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-vzwh5"] Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.486440 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-8bhxb"] Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.491581 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-vl9xd" podStartSLOduration=99.491561239 podStartE2EDuration="1m39.491561239s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:27.485705182 +0000 UTC m=+123.369593480" watchObservedRunningTime="2025-11-26 06:50:27.491561239 +0000 UTC m=+123.375449538" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.533031 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.533439 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.033427259 +0000 UTC m=+123.917315557 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.588243 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:27 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:27 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:27 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.588333 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.614664 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-b65ps" podStartSLOduration=99.614632488 podStartE2EDuration="1m39.614632488s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:27.607508777 +0000 UTC m=+123.491397076" watchObservedRunningTime="2025-11-26 06:50:27.614632488 +0000 UTC m=+123.498520786" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.637797 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.638383 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.138367175 +0000 UTC m=+124.022255472 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.716728 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-q68m6" podStartSLOduration=99.716706025 podStartE2EDuration="1m39.716706025s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:27.705722578 +0000 UTC m=+123.589610876" watchObservedRunningTime="2025-11-26 06:50:27.716706025 +0000 UTC m=+123.600594323" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.719223 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8"] Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.740221 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.740566 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.240555197 +0000 UTC m=+124.124443495 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.770785 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-wpd96"] Nov 26 06:50:27 crc kubenswrapper[4492]: W1126 06:50:27.778088 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9fc580a3_4f35_4c1c_8467_7cec5540712c.slice/crio-58a4d15d1cfee08829b08bb9cafc8bbe123153aea9da0cef823cf3ee70199aad WatchSource:0}: Error finding container 58a4d15d1cfee08829b08bb9cafc8bbe123153aea9da0cef823cf3ee70199aad: Status 404 returned error can't find the container with id 58a4d15d1cfee08829b08bb9cafc8bbe123153aea9da0cef823cf3ee70199aad Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.820162 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6cm4x" podStartSLOduration=99.820146952 podStartE2EDuration="1m39.820146952s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:27.819104713 +0000 UTC m=+123.702993011" watchObservedRunningTime="2025-11-26 06:50:27.820146952 +0000 UTC m=+123.704035250" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.841883 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.841999 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.34197809 +0000 UTC m=+124.225866388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.846219 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.846537 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.34652623 +0000 UTC m=+124.230414528 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.915603 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dtlxh" podStartSLOduration=99.915586268 podStartE2EDuration="1m39.915586268s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:27.913573435 +0000 UTC m=+123.797461723" watchObservedRunningTime="2025-11-26 06:50:27.915586268 +0000 UTC m=+123.799474566" Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.935259 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-v9n6p"] Nov 26 06:50:27 crc kubenswrapper[4492]: I1126 06:50:27.947533 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:27 crc kubenswrapper[4492]: E1126 06:50:27.947914 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.447901003 +0000 UTC m=+124.331789302 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.036715 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkhsz" podStartSLOduration=99.998290455 podStartE2EDuration="1m39.998290455s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:27.98649415 +0000 UTC m=+123.870382448" watchObservedRunningTime="2025-11-26 06:50:27.998290455 +0000 UTC m=+123.882178743" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.049242 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.060452 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.560433421 +0000 UTC m=+124.444321719 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.150533 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.150938 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.65092188 +0000 UTC m=+124.534810179 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.236557 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-7nxg8" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.237202 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" podStartSLOduration=100.237191468 podStartE2EDuration="1m40.237191468s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:28.145434665 +0000 UTC m=+124.029322963" watchObservedRunningTime="2025-11-26 06:50:28.237191468 +0000 UTC m=+124.121079757" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.239288 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-f4fjj" podStartSLOduration=100.239282981 podStartE2EDuration="1m40.239282981s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:28.237916541 +0000 UTC m=+124.121804840" watchObservedRunningTime="2025-11-26 06:50:28.239282981 +0000 UTC m=+124.123171279" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.254156 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.254504 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.754491188 +0000 UTC m=+124.638379486 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.339402 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" event={"ID":"ca48c342-a03e-4ed6-9b9e-de6592300188","Type":"ContainerStarted","Data":"55e240ed03dae6488759f6d37b15fd637edaaa3e21fc3f0780316812edce5d4f"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.356729 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" event={"ID":"78231c38-8677-4dd0-b845-9a498909e94a","Type":"ContainerStarted","Data":"1b59422c2256206d7f82eb56209b22e3f935f2ed1eebb1d618517b732a253ee8"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.362266 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.362826 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.862810356 +0000 UTC m=+124.746698654 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.377395 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" event={"ID":"f84c3a91-f74e-4270-ab0a-7c92f151b3fd","Type":"ContainerStarted","Data":"0e8a73fcaeb21d1defdceb13c3c7cc948cfc0d979a3ca4ccc842fa4b58032bbd"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.418761 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" event={"ID":"288b0000-22a3-4ae5-b07d-46d62037c91e","Type":"ContainerStarted","Data":"9ae599ad0cd1239bdbe7be10adecba06931f5ba4331fbe800176029cd7404136"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.420566 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.450965 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-p4c46" event={"ID":"7b1d2d57-f6df-48c9-b763-cd42f141b4f8","Type":"ContainerStarted","Data":"ceeaf41c5488c91511e5f012249257b1db72c1151de8e00ae238c1537a58eb58"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.455208 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" event={"ID":"8815abe1-5cb1-4baa-97be-479d29d71055","Type":"ContainerStarted","Data":"d68877c67f0aaa273ed8a0536a6498546bc445337eca7220e781b1b4c1fc1a59"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.455247 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" event={"ID":"8815abe1-5cb1-4baa-97be-479d29d71055","Type":"ContainerStarted","Data":"3e968017d91ddc45c84431113eea12bfa14373eebbd003063c1c7dc00f9cad4c"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.463973 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.464267 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:28.964255793 +0000 UTC m=+124.848144080 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.467653 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" event={"ID":"3d7ce394-3661-4a68-be77-51bf1e1d5c94","Type":"ContainerStarted","Data":"ecdaa104195e6fa4bdc03d6f78030285946e02c2062accb4bd8aa404f07f2eba"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.469214 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" event={"ID":"7c5436ae-c6b0-4c8e-b45f-e580fef03690","Type":"ContainerStarted","Data":"a3fb4226b66e93e9f4b8e5ab19a9f686ae4bb121be382ad5f9a8949862737785"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.469241 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" event={"ID":"7c5436ae-c6b0-4c8e-b45f-e580fef03690","Type":"ContainerStarted","Data":"0ffa890cb3f00f0f6f2dd924099dc61dd2c6643071f1b58b08a63e79dfb2e897"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.487056 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" podStartSLOduration=100.487030374 podStartE2EDuration="1m40.487030374s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:28.484593372 +0000 UTC m=+124.368481670" watchObservedRunningTime="2025-11-26 06:50:28.487030374 +0000 UTC m=+124.370918662" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.518392 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-s4zp9" event={"ID":"46b3a5d1-cee1-4c83-b13f-94dcf4dee467","Type":"ContainerStarted","Data":"45b6d66ccd0f185f6e17e797cf5291029acacb14fac3f33cd89b108ee1d6fa90"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.518451 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-s4zp9" event={"ID":"46b3a5d1-cee1-4c83-b13f-94dcf4dee467","Type":"ContainerStarted","Data":"f3d24e53d4602667761774a81efd461617e1b40c69c649cd16336ddba9ffd101"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.551782 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" event={"ID":"0c3ee07d-c654-4e14-afbd-fa5de3145b4d","Type":"ContainerStarted","Data":"98769e416a9eb178c45aa93d55c15f3433e599261027a64c778f87ca7b7abe9f"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.566472 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.566953 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.066913227 +0000 UTC m=+124.950801525 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.567612 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.568193 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.068167174 +0000 UTC m=+124.952070249 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.580850 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-zth84" event={"ID":"c6597a61-a709-458b-9fd4-1c1213fda7c0","Type":"ContainerStarted","Data":"7ae1ab7e44e7f019db836b04bfc69e0bd959fc8ab106ae39988b6b9db1169cc0"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.580897 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-zth84" event={"ID":"c6597a61-a709-458b-9fd4-1c1213fda7c0","Type":"ContainerStarted","Data":"0effa4f1933f714729edc9e12988201eba8f96c74bf47470a98f9e2a67527397"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.604745 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:28 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:28 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:28 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.604801 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.617007 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" podStartSLOduration=100.616983779 podStartE2EDuration="1m40.616983779s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:28.616233098 +0000 UTC m=+124.500121396" watchObservedRunningTime="2025-11-26 06:50:28.616983779 +0000 UTC m=+124.500872066" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.625962 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" event={"ID":"2c89a992-675a-4189-9723-1a868ab0e4f5","Type":"ContainerStarted","Data":"4d0bbca277f62fa81954b225521cb6fcf348431e5f1881947fc9fb29980402ff"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.626006 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" event={"ID":"2c89a992-675a-4189-9723-1a868ab0e4f5","Type":"ContainerStarted","Data":"ab5da1bdde00d4625a50dff32a3e70f56bc57e01d356bd23bf8d88ec2512de9b"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.630021 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" event={"ID":"868da700-b5be-4495-b927-afcd1bc2c1cd","Type":"ContainerStarted","Data":"a706b7d92ce782afc1ac6f45bf0fd713a3f3fdb69cfa544049bbe3dbcd196339"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.630072 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" event={"ID":"868da700-b5be-4495-b927-afcd1bc2c1cd","Type":"ContainerStarted","Data":"b13689bce486bf1bc1d33f2c30cf2f41c7af3855b320c81c40d0d93a61f3def9"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.630084 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" event={"ID":"868da700-b5be-4495-b927-afcd1bc2c1cd","Type":"ContainerStarted","Data":"93c05ad26d9618380efb393463b30e11f510df643b7d38d4c1abcdf4f350bc2f"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.630700 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.653042 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-284jz" event={"ID":"abfcd3ff-81af-4eff-91d1-3329b3b437af","Type":"ContainerStarted","Data":"ac9648de581d136c30b55f3ce8e2861aa8f4efa3bdbc9757b356faf64c3be1a3"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.673794 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.674913 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.174894399 +0000 UTC m=+125.058782686 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.676631 4492 generic.go:334] "Generic (PLEG): container finished" podID="3e6813b0-5667-42dc-89e2-7c684448700c" containerID="368818d578aa7d67ee5ece4f8e7aa9e0f10bed341a6716822960c37dc27b3c85" exitCode=0 Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.676748 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" event={"ID":"3e6813b0-5667-42dc-89e2-7c684448700c","Type":"ContainerDied","Data":"368818d578aa7d67ee5ece4f8e7aa9e0f10bed341a6716822960c37dc27b3c85"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.697130 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" event={"ID":"9fc580a3-4f35-4c1c-8467-7cec5540712c","Type":"ContainerStarted","Data":"58a4d15d1cfee08829b08bb9cafc8bbe123153aea9da0cef823cf3ee70199aad"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.724795 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-62jrz" podStartSLOduration=100.724779804 podStartE2EDuration="1m40.724779804s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:28.681064868 +0000 UTC m=+124.564953165" watchObservedRunningTime="2025-11-26 06:50:28.724779804 +0000 UTC m=+124.608668101" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.726326 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c6mkp" podStartSLOduration=100.726319718 podStartE2EDuration="1m40.726319718s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:28.723563878 +0000 UTC m=+124.607452165" watchObservedRunningTime="2025-11-26 06:50:28.726319718 +0000 UTC m=+124.610208016" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.733619 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" event={"ID":"0c56a31a-563f-420c-8ab8-583dc3f757f6","Type":"ContainerStarted","Data":"f283d4eb6af52a178da3fa6008ee776f0cb4b4c73e758af14781788ee95a3c4a"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.733656 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" event={"ID":"0c56a31a-563f-420c-8ab8-583dc3f757f6","Type":"ContainerStarted","Data":"0cacfb8c877f6a2896ad7058f3a96c2b98b15ad000ed86be2a1b34674d4095dc"} Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.734745 4492 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-sb6qj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.734799 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" podUID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.735145 4492 patch_prober.go:28] interesting pod/downloads-7954f5f757-zvw72 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.735195 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zvw72" podUID="587ccafa-460d-41b6-bced-9a82822fa43c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.757570 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.777093 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-r6lm8" podStartSLOduration=100.777067212 podStartE2EDuration="1m40.777067212s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:28.776913654 +0000 UTC m=+124.660801952" watchObservedRunningTime="2025-11-26 06:50:28.777067212 +0000 UTC m=+124.660955510" Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.777687 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.777969 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.277953199 +0000 UTC m=+125.161841496 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:28 crc kubenswrapper[4492]: I1126 06:50:28.924469 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:28 crc kubenswrapper[4492]: E1126 06:50:28.927666 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.427644576 +0000 UTC m=+125.311532875 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.028058 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.028464 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.528449519 +0000 UTC m=+125.412337816 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.090290 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-7nxg8" podStartSLOduration=101.090272513 podStartE2EDuration="1m41.090272513s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:28.930344852 +0000 UTC m=+124.814233150" watchObservedRunningTime="2025-11-26 06:50:29.090272513 +0000 UTC m=+124.974160811" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.130422 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.131228 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.631201641 +0000 UTC m=+125.515089939 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.176021 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-v92zj" podStartSLOduration=101.176006855 podStartE2EDuration="1m41.176006855s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.09185136 +0000 UTC m=+124.975739659" watchObservedRunningTime="2025-11-26 06:50:29.176006855 +0000 UTC m=+125.059895153" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.211298 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d925t"] Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.212261 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.214766 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-czsvt" podStartSLOduration=101.214754522 podStartE2EDuration="1m41.214754522s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.212408332 +0000 UTC m=+125.096296630" watchObservedRunningTime="2025-11-26 06:50:29.214754522 +0000 UTC m=+125.098642820" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.218544 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.232140 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.232515 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.732499951 +0000 UTC m=+125.616388249 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.290643 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d925t"] Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.292424 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-vvl6g" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.333483 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.333689 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.833643649 +0000 UTC m=+125.717531948 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.333746 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-catalog-content\") pod \"community-operators-d925t\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.333815 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsddt\" (UniqueName: \"kubernetes.io/projected/a236508b-a76f-4029-b748-7bfdbe412825-kube-api-access-tsddt\") pod \"community-operators-d925t\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.333896 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.333976 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-utilities\") pod \"community-operators-d925t\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.334257 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.834238167 +0000 UTC m=+125.718126465 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.399334 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-zvw72" podStartSLOduration=101.399313085 podStartE2EDuration="1m41.399313085s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.329796989 +0000 UTC m=+125.213685286" watchObservedRunningTime="2025-11-26 06:50:29.399313085 +0000 UTC m=+125.283201383" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.420708 4492 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-gx7xk container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.420766 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" podUID="288b0000-22a3-4ae5-b07d-46d62037c91e" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.41:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.435586 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.435843 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-catalog-content\") pod \"community-operators-d925t\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.435872 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsddt\" (UniqueName: \"kubernetes.io/projected/a236508b-a76f-4029-b748-7bfdbe412825-kube-api-access-tsddt\") pod \"community-operators-d925t\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.435919 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-utilities\") pod \"community-operators-d925t\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.436368 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-utilities\") pod \"community-operators-d925t\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.436444 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:29.936428503 +0000 UTC m=+125.820316802 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.436639 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-catalog-content\") pod \"community-operators-d925t\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.453961 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-g2gzv" podStartSLOduration=101.453948317 podStartE2EDuration="1m41.453948317s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.404996237 +0000 UTC m=+125.288884536" watchObservedRunningTime="2025-11-26 06:50:29.453948317 +0000 UTC m=+125.337836615" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.454055 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" podStartSLOduration=101.45405079 podStartE2EDuration="1m41.45405079s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.451721432 +0000 UTC m=+125.335609730" watchObservedRunningTime="2025-11-26 06:50:29.45405079 +0000 UTC m=+125.337939088" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.495578 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsddt\" (UniqueName: \"kubernetes.io/projected/a236508b-a76f-4029-b748-7bfdbe412825-kube-api-access-tsddt\") pod \"community-operators-d925t\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.536905 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.537433 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.037413394 +0000 UTC m=+125.921301692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.537736 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d925t" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.566473 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" podStartSLOduration=101.566459204 podStartE2EDuration="1m41.566459204s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.505685502 +0000 UTC m=+125.389573800" watchObservedRunningTime="2025-11-26 06:50:29.566459204 +0000 UTC m=+125.450347503" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.568672 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-99qpv"] Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.569539 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.583334 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-284jz" podStartSLOduration=101.583310731 podStartE2EDuration="1m41.583310731s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.570165491 +0000 UTC m=+125.454053789" watchObservedRunningTime="2025-11-26 06:50:29.583310731 +0000 UTC m=+125.467199030" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.583754 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:29 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:29 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:29 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.583849 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.591495 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-99qpv"] Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.637774 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.638090 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.138057634 +0000 UTC m=+126.021945932 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.639054 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.639417 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.139403524 +0000 UTC m=+126.023291822 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.650260 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-hlq25" podStartSLOduration=101.650249372 podStartE2EDuration="1m41.650249372s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.617337194 +0000 UTC m=+125.501225493" watchObservedRunningTime="2025-11-26 06:50:29.650249372 +0000 UTC m=+125.534137670" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.669244 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" podStartSLOduration=101.669235272 podStartE2EDuration="1m41.669235272s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.667208332 +0000 UTC m=+125.551096629" watchObservedRunningTime="2025-11-26 06:50:29.669235272 +0000 UTC m=+125.553123570" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.738275 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" event={"ID":"ca48c342-a03e-4ed6-9b9e-de6592300188","Type":"ContainerStarted","Data":"5fcdd9779e4ec4a71e8e0a6bafe0e4df8293b792278de8bb3d3eeec93057b80a"} Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.739365 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-xrjz8" podStartSLOduration=101.739345144 podStartE2EDuration="1m41.739345144s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.737027166 +0000 UTC m=+125.620915464" watchObservedRunningTime="2025-11-26 06:50:29.739345144 +0000 UTC m=+125.623233442" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.739889 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.740202 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-utilities\") pod \"community-operators-99qpv\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.740308 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsdmv\" (UniqueName: \"kubernetes.io/projected/0abbeecb-4a32-445b-af18-2a6135721e74-kube-api-access-xsdmv\") pod \"community-operators-99qpv\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.740404 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-catalog-content\") pod \"community-operators-99qpv\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.740588 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.240574214 +0000 UTC m=+126.124462513 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.744609 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" event={"ID":"f84c3a91-f74e-4270-ab0a-7c92f151b3fd","Type":"ContainerStarted","Data":"a556e0b567a24ff0c07d06bedf994c6ec1fa94d91bdef3a639578fb93890d52c"} Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.747422 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-s4zp9" event={"ID":"46b3a5d1-cee1-4c83-b13f-94dcf4dee467","Type":"ContainerStarted","Data":"b803deda3b8f274495e8257fed4d733de4ba1dc7d0aa05c53498afbe8aac0d09"} Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.747856 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.749652 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" event={"ID":"3e6813b0-5667-42dc-89e2-7c684448700c","Type":"ContainerStarted","Data":"88a879505f75a339ec9e9765b26347b8d9d12d4930340e5b664a2aa476afe798"} Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.750715 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" event={"ID":"3d7ce394-3661-4a68-be77-51bf1e1d5c94","Type":"ContainerStarted","Data":"9740616d654fbdcb406ebaf3bb658250dada506f25077b7bc5050e00405ea34f"} Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.751878 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-6wjd8" event={"ID":"9fc580a3-4f35-4c1c-8467-7cec5540712c","Type":"ContainerStarted","Data":"0f96948b0720e52a48056b832028a313d7c0c55fb71446b563338a05e9d8c72e"} Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.753657 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" event={"ID":"78231c38-8677-4dd0-b845-9a498909e94a","Type":"ContainerStarted","Data":"2f532d78a3cd8aa12c9667558638d9dff6a31333ac60387a07d96c79b398d645"} Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.756079 4492 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-sb6qj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.756530 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" podUID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.769327 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.812620 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-zth84" podStartSLOduration=8.812534755 podStartE2EDuration="8.812534755s" podCreationTimestamp="2025-11-26 06:50:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.811972397 +0000 UTC m=+125.695860696" watchObservedRunningTime="2025-11-26 06:50:29.812534755 +0000 UTC m=+125.696423053" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.844082 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsdmv\" (UniqueName: \"kubernetes.io/projected/0abbeecb-4a32-445b-af18-2a6135721e74-kube-api-access-xsdmv\") pod \"community-operators-99qpv\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.844321 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.844420 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-catalog-content\") pod \"community-operators-99qpv\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.844934 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-utilities\") pod \"community-operators-99qpv\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.848421 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.348409441 +0000 UTC m=+126.232297740 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.848790 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-catalog-content\") pod \"community-operators-99qpv\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.850500 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-utilities\") pod \"community-operators-99qpv\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.863373 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-gx7xk" podStartSLOduration=101.863356559 podStartE2EDuration="1m41.863356559s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.860757493 +0000 UTC m=+125.744645791" watchObservedRunningTime="2025-11-26 06:50:29.863356559 +0000 UTC m=+125.747244858" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.889372 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsdmv\" (UniqueName: \"kubernetes.io/projected/0abbeecb-4a32-445b-af18-2a6135721e74-kube-api-access-xsdmv\") pod \"community-operators-99qpv\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.893446 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.944996 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kt8n4"] Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.946587 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:29 crc kubenswrapper[4492]: E1126 06:50:29.946926 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.446911905 +0000 UTC m=+126.330800204 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.947006 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:29 crc kubenswrapper[4492]: I1126 06:50:29.960553 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.003218 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-vzwh5" podStartSLOduration=102.003205196 podStartE2EDuration="1m42.003205196s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:29.993788474 +0000 UTC m=+125.877676771" watchObservedRunningTime="2025-11-26 06:50:30.003205196 +0000 UTC m=+125.887093494" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.009867 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kt8n4"] Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.050008 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrwlm\" (UniqueName: \"kubernetes.io/projected/817798d6-9eae-4ee2-9b2f-53c54772866c-kube-api-access-jrwlm\") pod \"certified-operators-kt8n4\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.050077 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.050105 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-catalog-content\") pod \"certified-operators-kt8n4\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.050195 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-utilities\") pod \"certified-operators-kt8n4\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.050492 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.550479051 +0000 UTC m=+126.434367349 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.087196 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" podStartSLOduration=102.087161716 podStartE2EDuration="1m42.087161716s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.036633233 +0000 UTC m=+125.920521532" watchObservedRunningTime="2025-11-26 06:50:30.087161716 +0000 UTC m=+125.971050004" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.088495 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mstsb" podStartSLOduration=102.088489121 podStartE2EDuration="1m42.088489121s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.081576908 +0000 UTC m=+125.965465206" watchObservedRunningTime="2025-11-26 06:50:30.088489121 +0000 UTC m=+125.972377419" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.151939 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.152259 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-p4c46" podStartSLOduration=9.152247234 podStartE2EDuration="9.152247234s" podCreationTimestamp="2025-11-26 06:50:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.120580617 +0000 UTC m=+126.004468914" watchObservedRunningTime="2025-11-26 06:50:30.152247234 +0000 UTC m=+126.036135521" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.152337 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrwlm\" (UniqueName: \"kubernetes.io/projected/817798d6-9eae-4ee2-9b2f-53c54772866c-kube-api-access-jrwlm\") pod \"certified-operators-kt8n4\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.152400 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-catalog-content\") pod \"certified-operators-kt8n4\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.152518 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-utilities\") pod \"certified-operators-kt8n4\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.152864 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-utilities\") pod \"certified-operators-kt8n4\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.152938 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.65291582 +0000 UTC m=+126.536804118 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.153368 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-catalog-content\") pod \"certified-operators-kt8n4\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.154794 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nptnl" podStartSLOduration=102.154787348 podStartE2EDuration="1m42.154787348s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.145064511 +0000 UTC m=+126.028952809" watchObservedRunningTime="2025-11-26 06:50:30.154787348 +0000 UTC m=+126.038675646" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.156604 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l62r5"] Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.157829 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.182529 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l62r5"] Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.211918 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrwlm\" (UniqueName: \"kubernetes.io/projected/817798d6-9eae-4ee2-9b2f-53c54772866c-kube-api-access-jrwlm\") pod \"certified-operators-kt8n4\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.254249 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-utilities\") pod \"certified-operators-l62r5\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.254350 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cthkd\" (UniqueName: \"kubernetes.io/projected/ec884242-662f-4387-b710-0d6c601a41c4-kube-api-access-cthkd\") pod \"certified-operators-l62r5\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.254462 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.254499 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-catalog-content\") pod \"certified-operators-l62r5\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.255078 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.755057375 +0000 UTC m=+126.638945673 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.314417 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-j8pvq" podStartSLOduration=102.314392482 podStartE2EDuration="1m42.314392482s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.291657676 +0000 UTC m=+126.175545974" watchObservedRunningTime="2025-11-26 06:50:30.314392482 +0000 UTC m=+126.198280780" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.318013 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.355269 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.355621 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-catalog-content\") pod \"certified-operators-l62r5\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.355711 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-utilities\") pod \"certified-operators-l62r5\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.355753 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cthkd\" (UniqueName: \"kubernetes.io/projected/ec884242-662f-4387-b710-0d6c601a41c4-kube-api-access-cthkd\") pod \"certified-operators-l62r5\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.356043 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.856019172 +0000 UTC m=+126.739907460 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.356386 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-catalog-content\") pod \"certified-operators-l62r5\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.356822 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-utilities\") pod \"certified-operators-l62r5\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.357734 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-v9n6p" podStartSLOduration=102.357723946 podStartE2EDuration="1m42.357723946s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.356675355 +0000 UTC m=+126.240563654" watchObservedRunningTime="2025-11-26 06:50:30.357723946 +0000 UTC m=+126.241612235" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.375106 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.376019 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.377001 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d925t"] Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.382230 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.382410 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.388271 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cthkd\" (UniqueName: \"kubernetes.io/projected/ec884242-662f-4387-b710-0d6c601a41c4-kube-api-access-cthkd\") pod \"certified-operators-l62r5\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.403818 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.417130 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-s4zp9" podStartSLOduration=9.41712088 podStartE2EDuration="9.41712088s" podCreationTimestamp="2025-11-26 06:50:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.414206892 +0000 UTC m=+126.298095180" watchObservedRunningTime="2025-11-26 06:50:30.41712088 +0000 UTC m=+126.301009168" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.462664 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/261335b4-4d32-402e-8c7f-19d07db47a2a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"261335b4-4d32-402e-8c7f-19d07db47a2a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.462709 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/261335b4-4d32-402e-8c7f-19d07db47a2a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"261335b4-4d32-402e-8c7f-19d07db47a2a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.462797 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.463274 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:30.963255664 +0000 UTC m=+126.847143963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.493671 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.526191 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" podStartSLOduration=102.526157576 podStartE2EDuration="1m42.526157576s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.525391076 +0000 UTC m=+126.409279374" watchObservedRunningTime="2025-11-26 06:50:30.526157576 +0000 UTC m=+126.410045874" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.527350 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-wpd96" podStartSLOduration=102.527343135 podStartE2EDuration="1m42.527343135s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.493713669 +0000 UTC m=+126.377601967" watchObservedRunningTime="2025-11-26 06:50:30.527343135 +0000 UTC m=+126.411231423" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.564967 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.565353 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/261335b4-4d32-402e-8c7f-19d07db47a2a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"261335b4-4d32-402e-8c7f-19d07db47a2a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.565375 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/261335b4-4d32-402e-8c7f-19d07db47a2a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"261335b4-4d32-402e-8c7f-19d07db47a2a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.565453 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/261335b4-4d32-402e-8c7f-19d07db47a2a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"261335b4-4d32-402e-8c7f-19d07db47a2a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.565515 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.0655044 +0000 UTC m=+126.949392698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.567599 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-kfm7d" podStartSLOduration=102.567582076 podStartE2EDuration="1m42.567582076s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:30.564527625 +0000 UTC m=+126.448415922" watchObservedRunningTime="2025-11-26 06:50:30.567582076 +0000 UTC m=+126.451470374" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.587305 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:30 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:30 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:30 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.587335 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.593896 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/261335b4-4d32-402e-8c7f-19d07db47a2a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"261335b4-4d32-402e-8c7f-19d07db47a2a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.669263 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.670046 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.17002089 +0000 UTC m=+127.053909177 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.715498 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.721454 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-99qpv"] Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.770384 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.771131 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.271112761 +0000 UTC m=+127.155001059 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.811314 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" event={"ID":"78231c38-8677-4dd0-b845-9a498909e94a","Type":"ContainerStarted","Data":"43ac81209f8b31f481943db6ac3ae184b453196a3a9c12e78e9041c3d6177f27"} Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.811357 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" event={"ID":"78231c38-8677-4dd0-b845-9a498909e94a","Type":"ContainerStarted","Data":"f50ccf058350e83e5f4b592405616ad3fdc5d7f289d6f9ec0f98dbb2de9fb9be"} Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.818730 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d925t" event={"ID":"a236508b-a76f-4029-b748-7bfdbe412825","Type":"ContainerStarted","Data":"538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c"} Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.818758 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d925t" event={"ID":"a236508b-a76f-4029-b748-7bfdbe412825","Type":"ContainerStarted","Data":"2d76cf5f0e0acfed3f18147816d606c978c42fa9a9fb1271577f6d96b44b5918"} Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.827679 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.830328 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99qpv" event={"ID":"0abbeecb-4a32-445b-af18-2a6135721e74","Type":"ContainerStarted","Data":"9e4a811354ca8974df2de17bcafc50d113ba5e53c6924a8d3c3365df5b867eba"} Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.872474 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.872849 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.372834396 +0000 UTC m=+127.256722695 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.978269 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.978425 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.478393385 +0000 UTC m=+127.362281683 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:30 crc kubenswrapper[4492]: I1126 06:50:30.978875 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:30 crc kubenswrapper[4492]: E1126 06:50:30.981474 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.481452035 +0000 UTC m=+127.365340323 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.080714 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.081525 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.581501998 +0000 UTC m=+127.465390296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.097535 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kt8n4"] Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.184312 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.184703 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.684683888 +0000 UTC m=+127.568572186 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.285101 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.285752 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.785733961 +0000 UTC m=+127.669622260 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.392834 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l62r5"] Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.393820 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.394238 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.894224461 +0000 UTC m=+127.778112750 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.495338 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.496030 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:31.996000989 +0000 UTC m=+127.879889278 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.529485 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 06:50:31 crc kubenswrapper[4492]: W1126 06:50:31.534364 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod261335b4_4d32_402e_8c7f_19d07db47a2a.slice/crio-cf660d5b53887806d4e1d0713eeab5bfa8c9f060a86a674120b4315e38319b11 WatchSource:0}: Error finding container cf660d5b53887806d4e1d0713eeab5bfa8c9f060a86a674120b4315e38319b11: Status 404 returned error can't find the container with id cf660d5b53887806d4e1d0713eeab5bfa8c9f060a86a674120b4315e38319b11 Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.583778 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:31 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:31 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:31 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.583836 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.597701 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.598138 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.098117096 +0000 UTC m=+127.982005384 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.698528 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.698789 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.198727653 +0000 UTC m=+128.082615951 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.699286 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.699797 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.199783778 +0000 UTC m=+128.083672076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.800283 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.800564 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.300531753 +0000 UTC m=+128.184420051 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.800831 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.801104 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.301095934 +0000 UTC m=+128.184984232 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.847459 4492 generic.go:334] "Generic (PLEG): container finished" podID="0abbeecb-4a32-445b-af18-2a6135721e74" containerID="bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399" exitCode=0 Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.847530 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99qpv" event={"ID":"0abbeecb-4a32-445b-af18-2a6135721e74","Type":"ContainerDied","Data":"bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399"} Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.853780 4492 generic.go:334] "Generic (PLEG): container finished" podID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerID="6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c" exitCode=0 Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.853832 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8n4" event={"ID":"817798d6-9eae-4ee2-9b2f-53c54772866c","Type":"ContainerDied","Data":"6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c"} Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.853900 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8n4" event={"ID":"817798d6-9eae-4ee2-9b2f-53c54772866c","Type":"ContainerStarted","Data":"08fabe7574a10f96ee9bb717176c9ca0384a01b34de8877259113e0fd29d111b"} Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.858392 4492 generic.go:334] "Generic (PLEG): container finished" podID="ec884242-662f-4387-b710-0d6c601a41c4" containerID="8480ae27267dfea4dc40407b2e90b8d2987df8738a7cbcc4895caf48fda222e7" exitCode=0 Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.858525 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l62r5" event={"ID":"ec884242-662f-4387-b710-0d6c601a41c4","Type":"ContainerDied","Data":"8480ae27267dfea4dc40407b2e90b8d2987df8738a7cbcc4895caf48fda222e7"} Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.858576 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l62r5" event={"ID":"ec884242-662f-4387-b710-0d6c601a41c4","Type":"ContainerStarted","Data":"f80bb35757fe5321e47e84f8daad7e8eccf1ec0eec1cbd56feb284094a540202"} Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.860424 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"261335b4-4d32-402e-8c7f-19d07db47a2a","Type":"ContainerStarted","Data":"cf660d5b53887806d4e1d0713eeab5bfa8c9f060a86a674120b4315e38319b11"} Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.871869 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" event={"ID":"78231c38-8677-4dd0-b845-9a498909e94a","Type":"ContainerStarted","Data":"fba8daacb05477a7c667d6f589b3a75ecf9fab5cb1777b24da01fd7939241693"} Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.874663 4492 generic.go:334] "Generic (PLEG): container finished" podID="a236508b-a76f-4029-b748-7bfdbe412825" containerID="538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c" exitCode=0 Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.874727 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d925t" event={"ID":"a236508b-a76f-4029-b748-7bfdbe412825","Type":"ContainerDied","Data":"538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c"} Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.903912 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:31 crc kubenswrapper[4492]: E1126 06:50:31.904297 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.40428046 +0000 UTC m=+128.288168758 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.949941 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7rl4j"] Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.951377 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.954722 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.959278 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-8bhxb" podStartSLOduration=10.959267754 podStartE2EDuration="10.959267754s" podCreationTimestamp="2025-11-26 06:50:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:31.955845531 +0000 UTC m=+127.839733829" watchObservedRunningTime="2025-11-26 06:50:31.959267754 +0000 UTC m=+127.843156042" Nov 26 06:50:31 crc kubenswrapper[4492]: I1126 06:50:31.972489 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rl4j"] Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.006431 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:32 crc kubenswrapper[4492]: E1126 06:50:32.006739 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.506727248 +0000 UTC m=+128.390615546 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.107377 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:32 crc kubenswrapper[4492]: E1126 06:50:32.107567 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.607539975 +0000 UTC m=+128.491428274 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.107763 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-utilities\") pod \"redhat-marketplace-7rl4j\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.107802 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnrrj\" (UniqueName: \"kubernetes.io/projected/abb41535-15fd-41da-9e54-d68ec23a99be-kube-api-access-xnrrj\") pod \"redhat-marketplace-7rl4j\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.107851 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.107876 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-catalog-content\") pod \"redhat-marketplace-7rl4j\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: E1126 06:50:32.108188 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.608167395 +0000 UTC m=+128.492055693 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.208836 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.209248 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-utilities\") pod \"redhat-marketplace-7rl4j\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.209275 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnrrj\" (UniqueName: \"kubernetes.io/projected/abb41535-15fd-41da-9e54-d68ec23a99be-kube-api-access-xnrrj\") pod \"redhat-marketplace-7rl4j\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.209352 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-catalog-content\") pod \"redhat-marketplace-7rl4j\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.209761 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-catalog-content\") pod \"redhat-marketplace-7rl4j\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: E1126 06:50:32.209826 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.709813998 +0000 UTC m=+128.593702297 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.210011 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-utilities\") pod \"redhat-marketplace-7rl4j\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.230132 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnrrj\" (UniqueName: \"kubernetes.io/projected/abb41535-15fd-41da-9e54-d68ec23a99be-kube-api-access-xnrrj\") pod \"redhat-marketplace-7rl4j\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.276866 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.311527 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:32 crc kubenswrapper[4492]: E1126 06:50:32.311976 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.811958639 +0000 UTC m=+128.695846927 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.350827 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7gl7t"] Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.353643 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.353844 4492 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.360194 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7gl7t"] Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.413818 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:32 crc kubenswrapper[4492]: E1126 06:50:32.413915 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.913885941 +0000 UTC m=+128.797774230 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.414213 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:32 crc kubenswrapper[4492]: E1126 06:50:32.414750 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:32.91473635 +0000 UTC m=+128.798624648 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.517786 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:32 crc kubenswrapper[4492]: E1126 06:50:32.518020 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 06:50:33.018001707 +0000 UTC m=+128.901890005 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.518363 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g6nm\" (UniqueName: \"kubernetes.io/projected/bff9a13a-5b4a-4789-ab6d-bff2ad038798-kube-api-access-9g6nm\") pod \"redhat-marketplace-7gl7t\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.518399 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-catalog-content\") pod \"redhat-marketplace-7gl7t\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.518445 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-utilities\") pod \"redhat-marketplace-7gl7t\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.518507 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:32 crc kubenswrapper[4492]: E1126 06:50:32.519805 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 06:50:33.019782274 +0000 UTC m=+128.903670572 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-54mhl" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.545942 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rl4j"] Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.565141 4492 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-26T06:50:32.353857992Z","Handler":null,"Name":""} Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.568934 4492 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.569056 4492 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.595681 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:32 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:32 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:32 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.596131 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.619352 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.621432 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g6nm\" (UniqueName: \"kubernetes.io/projected/bff9a13a-5b4a-4789-ab6d-bff2ad038798-kube-api-access-9g6nm\") pod \"redhat-marketplace-7gl7t\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.621495 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-catalog-content\") pod \"redhat-marketplace-7gl7t\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.621564 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-utilities\") pod \"redhat-marketplace-7gl7t\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.622166 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-catalog-content\") pod \"redhat-marketplace-7gl7t\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.622585 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-utilities\") pod \"redhat-marketplace-7gl7t\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.643877 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g6nm\" (UniqueName: \"kubernetes.io/projected/bff9a13a-5b4a-4789-ab6d-bff2ad038798-kube-api-access-9g6nm\") pod \"redhat-marketplace-7gl7t\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.653810 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.685200 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.723677 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.742082 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-m6grh"] Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.743377 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.745384 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.751924 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m6grh"] Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.774905 4492 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.774966 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.799716 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-54mhl\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.824444 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-catalog-content\") pod \"redhat-operators-m6grh\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.824698 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-utilities\") pod \"redhat-operators-m6grh\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.824885 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxbgt\" (UniqueName: \"kubernetes.io/projected/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-kube-api-access-pxbgt\") pod \"redhat-operators-m6grh\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.876007 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.894433 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.916621 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rl4j" event={"ID":"abb41535-15fd-41da-9e54-d68ec23a99be","Type":"ContainerStarted","Data":"af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70"} Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.916926 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rl4j" event={"ID":"abb41535-15fd-41da-9e54-d68ec23a99be","Type":"ContainerStarted","Data":"4646573b5aebfb4bd1e281cf66d449172c5e3c9b7309bd3dd908fc152e78ce23"} Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.928448 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-utilities\") pod \"redhat-operators-m6grh\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.928537 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxbgt\" (UniqueName: \"kubernetes.io/projected/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-kube-api-access-pxbgt\") pod \"redhat-operators-m6grh\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.928569 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-catalog-content\") pod \"redhat-operators-m6grh\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.928987 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-catalog-content\") pod \"redhat-operators-m6grh\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.929087 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-utilities\") pod \"redhat-operators-m6grh\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.929699 4492 generic.go:334] "Generic (PLEG): container finished" podID="261335b4-4d32-402e-8c7f-19d07db47a2a" containerID="6cad0e813177b806a723dd49fbe7a0ba7103dd50d9e01a115fab78dfd1c501cd" exitCode=0 Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.929785 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"261335b4-4d32-402e-8c7f-19d07db47a2a","Type":"ContainerDied","Data":"6cad0e813177b806a723dd49fbe7a0ba7103dd50d9e01a115fab78dfd1c501cd"} Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.942155 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qkl8m"] Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.943293 4492 generic.go:334] "Generic (PLEG): container finished" podID="7c5436ae-c6b0-4c8e-b45f-e580fef03690" containerID="a3fb4226b66e93e9f4b8e5ab19a9f686ae4bb121be382ad5f9a8949862737785" exitCode=0 Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.947567 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" event={"ID":"7c5436ae-c6b0-4c8e-b45f-e580fef03690","Type":"ContainerDied","Data":"a3fb4226b66e93e9f4b8e5ab19a9f686ae4bb121be382ad5f9a8949862737785"} Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.947595 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qkl8m"] Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.947710 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:32 crc kubenswrapper[4492]: I1126 06:50:32.962956 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxbgt\" (UniqueName: \"kubernetes.io/projected/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-kube-api-access-pxbgt\") pod \"redhat-operators-m6grh\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.030846 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-utilities\") pod \"redhat-operators-qkl8m\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.030915 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhb29\" (UniqueName: \"kubernetes.io/projected/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-kube-api-access-fhb29\") pod \"redhat-operators-qkl8m\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.030939 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-catalog-content\") pod \"redhat-operators-qkl8m\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.048919 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.049351 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.057382 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.080512 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.132931 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-utilities\") pod \"redhat-operators-qkl8m\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.133055 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhb29\" (UniqueName: \"kubernetes.io/projected/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-kube-api-access-fhb29\") pod \"redhat-operators-qkl8m\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.133198 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-catalog-content\") pod \"redhat-operators-qkl8m\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.133446 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-utilities\") pod \"redhat-operators-qkl8m\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.134167 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-catalog-content\") pod \"redhat-operators-qkl8m\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.154627 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhb29\" (UniqueName: \"kubernetes.io/projected/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-kube-api-access-fhb29\") pod \"redhat-operators-qkl8m\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.244459 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.244761 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.246989 4492 patch_prober.go:28] interesting pod/console-f9d7485db-v92zj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.8:8443/health\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.247049 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-v92zj" podUID="ab14021b-87d7-43d0-9357-e8739e2d7dd1" containerName="console" probeResult="failure" output="Get \"https://10.217.0.8:8443/health\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.319267 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.323442 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7gl7t"] Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.385529 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-54mhl"] Nov 26 06:50:33 crc kubenswrapper[4492]: W1126 06:50:33.472499 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b02d314_392f_44f3_a88c_57d8852fbcf9.slice/crio-c60b9310417f8d93196e36ee8e19450f7df7c5499d7ad5d69169eafac2b527b5 WatchSource:0}: Error finding container c60b9310417f8d93196e36ee8e19450f7df7c5499d7ad5d69169eafac2b527b5: Status 404 returned error can't find the container with id c60b9310417f8d93196e36ee8e19450f7df7c5499d7ad5d69169eafac2b527b5 Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.601160 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:33 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:33 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:33 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.601245 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.607612 4492 patch_prober.go:28] interesting pod/downloads-7954f5f757-zvw72 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.607845 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zvw72" podUID="587ccafa-460d-41b6-bced-9a82822fa43c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.607782 4492 patch_prober.go:28] interesting pod/downloads-7954f5f757-zvw72 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.608002 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zvw72" podUID="587ccafa-460d-41b6-bced-9a82822fa43c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.643461 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m6grh"] Nov 26 06:50:33 crc kubenswrapper[4492]: W1126 06:50:33.671101 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72328da8_8a37_4bd5_b1f5_c26ce6aefd5a.slice/crio-a0b7baaf880f7af08352c243ca5deb9d6aa28462e86dc0e101a2f0692cbdf059 WatchSource:0}: Error finding container a0b7baaf880f7af08352c243ca5deb9d6aa28462e86dc0e101a2f0692cbdf059: Status 404 returned error can't find the container with id a0b7baaf880f7af08352c243ca5deb9d6aa28462e86dc0e101a2f0692cbdf059 Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.929597 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qkl8m"] Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.970469 4492 generic.go:334] "Generic (PLEG): container finished" podID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerID="404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a" exitCode=0 Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.970645 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7gl7t" event={"ID":"bff9a13a-5b4a-4789-ab6d-bff2ad038798","Type":"ContainerDied","Data":"404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a"} Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.970682 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7gl7t" event={"ID":"bff9a13a-5b4a-4789-ab6d-bff2ad038798","Type":"ContainerStarted","Data":"c66af40222a1ffbc2650500d0edb317cf2d1a52177e290461780745ae64e519e"} Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.980160 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6grh" event={"ID":"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a","Type":"ContainerStarted","Data":"2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357"} Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.980229 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6grh" event={"ID":"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a","Type":"ContainerStarted","Data":"a0b7baaf880f7af08352c243ca5deb9d6aa28462e86dc0e101a2f0692cbdf059"} Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.985509 4492 generic.go:334] "Generic (PLEG): container finished" podID="abb41535-15fd-41da-9e54-d68ec23a99be" containerID="af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70" exitCode=0 Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.985562 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rl4j" event={"ID":"abb41535-15fd-41da-9e54-d68ec23a99be","Type":"ContainerDied","Data":"af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70"} Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.990900 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkl8m" event={"ID":"21a88fce-0f39-4a48-a0cd-61014b0a6ea1","Type":"ContainerStarted","Data":"a73794f730de375c8b01d984b30d7c1d964a54b79ebc7515b8e653405bcd8f45"} Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.996961 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" event={"ID":"2b02d314-392f-44f3-a88c-57d8852fbcf9","Type":"ContainerStarted","Data":"3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3"} Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.996993 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" event={"ID":"2b02d314-392f-44f3-a88c-57d8852fbcf9","Type":"ContainerStarted","Data":"c60b9310417f8d93196e36ee8e19450f7df7c5499d7ad5d69169eafac2b527b5"} Nov 26 06:50:33 crc kubenswrapper[4492]: I1126 06:50:33.997222 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.004097 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-284jz" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.075817 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.146257 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" podStartSLOduration=106.146235606 podStartE2EDuration="1m46.146235606s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:34.108490323 +0000 UTC m=+129.992378621" watchObservedRunningTime="2025-11-26 06:50:34.146235606 +0000 UTC m=+130.030123894" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.205707 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.206456 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.211190 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.211382 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.219253 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.272061 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.272195 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.382642 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.382723 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.383143 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.405871 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.491529 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.523597 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.565520 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.565646 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.566797 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.574561 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.582905 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.586854 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:34 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:34 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:34 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.587091 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.692600 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7c5436ae-c6b0-4c8e-b45f-e580fef03690-config-volume\") pod \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.692638 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7c5436ae-c6b0-4c8e-b45f-e580fef03690-secret-volume\") pod \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.692700 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rhl2\" (UniqueName: \"kubernetes.io/projected/7c5436ae-c6b0-4c8e-b45f-e580fef03690-kube-api-access-8rhl2\") pod \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\" (UID: \"7c5436ae-c6b0-4c8e-b45f-e580fef03690\") " Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.694254 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c5436ae-c6b0-4c8e-b45f-e580fef03690-config-volume" (OuterVolumeSpecName: "config-volume") pod "7c5436ae-c6b0-4c8e-b45f-e580fef03690" (UID: "7c5436ae-c6b0-4c8e-b45f-e580fef03690"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.712738 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c5436ae-c6b0-4c8e-b45f-e580fef03690-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7c5436ae-c6b0-4c8e-b45f-e580fef03690" (UID: "7c5436ae-c6b0-4c8e-b45f-e580fef03690"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.717339 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c5436ae-c6b0-4c8e-b45f-e580fef03690-kube-api-access-8rhl2" (OuterVolumeSpecName: "kube-api-access-8rhl2") pod "7c5436ae-c6b0-4c8e-b45f-e580fef03690" (UID: "7c5436ae-c6b0-4c8e-b45f-e580fef03690"). InnerVolumeSpecName "kube-api-access-8rhl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.723399 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.793641 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/261335b4-4d32-402e-8c7f-19d07db47a2a-kube-api-access\") pod \"261335b4-4d32-402e-8c7f-19d07db47a2a\" (UID: \"261335b4-4d32-402e-8c7f-19d07db47a2a\") " Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.793706 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/261335b4-4d32-402e-8c7f-19d07db47a2a-kubelet-dir\") pod \"261335b4-4d32-402e-8c7f-19d07db47a2a\" (UID: \"261335b4-4d32-402e-8c7f-19d07db47a2a\") " Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.794261 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7c5436ae-c6b0-4c8e-b45f-e580fef03690-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.794282 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7c5436ae-c6b0-4c8e-b45f-e580fef03690-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.794292 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rhl2\" (UniqueName: \"kubernetes.io/projected/7c5436ae-c6b0-4c8e-b45f-e580fef03690-kube-api-access-8rhl2\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.795682 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/261335b4-4d32-402e-8c7f-19d07db47a2a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "261335b4-4d32-402e-8c7f-19d07db47a2a" (UID: "261335b4-4d32-402e-8c7f-19d07db47a2a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.808125 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/261335b4-4d32-402e-8c7f-19d07db47a2a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "261335b4-4d32-402e-8c7f-19d07db47a2a" (UID: "261335b4-4d32-402e-8c7f-19d07db47a2a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.897824 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/261335b4-4d32-402e-8c7f-19d07db47a2a-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:34 crc kubenswrapper[4492]: I1126 06:50:34.897853 4492 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/261335b4-4d32-402e-8c7f-19d07db47a2a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.011721 4492 generic.go:334] "Generic (PLEG): container finished" podID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerID="2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357" exitCode=0 Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.011776 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6grh" event={"ID":"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a","Type":"ContainerDied","Data":"2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357"} Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.018976 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.019115 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"261335b4-4d32-402e-8c7f-19d07db47a2a","Type":"ContainerDied","Data":"cf660d5b53887806d4e1d0713eeab5bfa8c9f060a86a674120b4315e38319b11"} Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.019213 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf660d5b53887806d4e1d0713eeab5bfa8c9f060a86a674120b4315e38319b11" Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.026121 4492 generic.go:334] "Generic (PLEG): container finished" podID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerID="f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb" exitCode=0 Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.026194 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkl8m" event={"ID":"21a88fce-0f39-4a48-a0cd-61014b0a6ea1","Type":"ContainerDied","Data":"f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb"} Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.037399 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.039120 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6" event={"ID":"7c5436ae-c6b0-4c8e-b45f-e580fef03690","Type":"ContainerDied","Data":"0ffa890cb3f00f0f6f2dd924099dc61dd2c6643071f1b58b08a63e79dfb2e897"} Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.039448 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ffa890cb3f00f0f6f2dd924099dc61dd2c6643071f1b58b08a63e79dfb2e897" Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.046116 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-c2wr2" Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.194482 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.584225 4492 patch_prober.go:28] interesting pod/router-default-5444994796-r6lm8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 06:50:35 crc kubenswrapper[4492]: [-]has-synced failed: reason withheld Nov 26 06:50:35 crc kubenswrapper[4492]: [+]process-running ok Nov 26 06:50:35 crc kubenswrapper[4492]: healthz check failed Nov 26 06:50:35 crc kubenswrapper[4492]: I1126 06:50:35.584286 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-r6lm8" podUID="198fd913-6670-4880-874e-cce2c186c203" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 06:50:36 crc kubenswrapper[4492]: I1126 06:50:36.081567 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bef0c3ab-34e2-4c7c-972f-cec18fb94b57","Type":"ContainerStarted","Data":"33084ddd1cdb12568f4f8e17c33e130f6bb57868512b83a2781677dcf365093a"} Nov 26 06:50:36 crc kubenswrapper[4492]: I1126 06:50:36.081862 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bef0c3ab-34e2-4c7c-972f-cec18fb94b57","Type":"ContainerStarted","Data":"247d508b99c83773105b1c4a0e11745e473484b1a0ef8cfa0b876ec62b525bef"} Nov 26 06:50:36 crc kubenswrapper[4492]: I1126 06:50:36.101054 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.101021104 podStartE2EDuration="2.101021104s" podCreationTimestamp="2025-11-26 06:50:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:50:36.098606986 +0000 UTC m=+131.982495284" watchObservedRunningTime="2025-11-26 06:50:36.101021104 +0000 UTC m=+131.984909403" Nov 26 06:50:36 crc kubenswrapper[4492]: I1126 06:50:36.583216 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:36 crc kubenswrapper[4492]: I1126 06:50:36.587109 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-r6lm8" Nov 26 06:50:37 crc kubenswrapper[4492]: I1126 06:50:37.108907 4492 generic.go:334] "Generic (PLEG): container finished" podID="bef0c3ab-34e2-4c7c-972f-cec18fb94b57" containerID="33084ddd1cdb12568f4f8e17c33e130f6bb57868512b83a2781677dcf365093a" exitCode=0 Nov 26 06:50:37 crc kubenswrapper[4492]: I1126 06:50:37.109083 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bef0c3ab-34e2-4c7c-972f-cec18fb94b57","Type":"ContainerDied","Data":"33084ddd1cdb12568f4f8e17c33e130f6bb57868512b83a2781677dcf365093a"} Nov 26 06:50:38 crc kubenswrapper[4492]: I1126 06:50:38.455252 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:38 crc kubenswrapper[4492]: I1126 06:50:38.491007 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kubelet-dir\") pod \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\" (UID: \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\") " Nov 26 06:50:38 crc kubenswrapper[4492]: I1126 06:50:38.491064 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kube-api-access\") pod \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\" (UID: \"bef0c3ab-34e2-4c7c-972f-cec18fb94b57\") " Nov 26 06:50:38 crc kubenswrapper[4492]: I1126 06:50:38.491856 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "bef0c3ab-34e2-4c7c-972f-cec18fb94b57" (UID: "bef0c3ab-34e2-4c7c-972f-cec18fb94b57"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:50:38 crc kubenswrapper[4492]: I1126 06:50:38.506769 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "bef0c3ab-34e2-4c7c-972f-cec18fb94b57" (UID: "bef0c3ab-34e2-4c7c-972f-cec18fb94b57"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:50:38 crc kubenswrapper[4492]: I1126 06:50:38.593679 4492 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:38 crc kubenswrapper[4492]: I1126 06:50:38.593710 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bef0c3ab-34e2-4c7c-972f-cec18fb94b57-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:39 crc kubenswrapper[4492]: I1126 06:50:39.143336 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"bef0c3ab-34e2-4c7c-972f-cec18fb94b57","Type":"ContainerDied","Data":"247d508b99c83773105b1c4a0e11745e473484b1a0ef8cfa0b876ec62b525bef"} Nov 26 06:50:39 crc kubenswrapper[4492]: I1126 06:50:39.143372 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="247d508b99c83773105b1c4a0e11745e473484b1a0ef8cfa0b876ec62b525bef" Nov 26 06:50:39 crc kubenswrapper[4492]: I1126 06:50:39.143420 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 06:50:40 crc kubenswrapper[4492]: I1126 06:50:40.598191 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-s4zp9" Nov 26 06:50:41 crc kubenswrapper[4492]: I1126 06:50:41.079806 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:50:43 crc kubenswrapper[4492]: I1126 06:50:43.244576 4492 patch_prober.go:28] interesting pod/console-f9d7485db-v92zj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.8:8443/health\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 26 06:50:43 crc kubenswrapper[4492]: I1126 06:50:43.244940 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-v92zj" podUID="ab14021b-87d7-43d0-9357-e8739e2d7dd1" containerName="console" probeResult="failure" output="Get \"https://10.217.0.8:8443/health\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 26 06:50:43 crc kubenswrapper[4492]: I1126 06:50:43.611157 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-zvw72" Nov 26 06:50:49 crc kubenswrapper[4492]: I1126 06:50:49.442072 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:50:49 crc kubenswrapper[4492]: I1126 06:50:49.442628 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.220715 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.221305 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.221344 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.221383 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.222692 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.222701 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.228657 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.236348 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.245119 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.247194 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.350734 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.356859 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 06:50:50 crc kubenswrapper[4492]: I1126 06:50:50.486491 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 06:50:52 crc kubenswrapper[4492]: I1126 06:50:52.900542 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:50:53 crc kubenswrapper[4492]: I1126 06:50:53.248299 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:53 crc kubenswrapper[4492]: I1126 06:50:53.252255 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-v92zj" Nov 26 06:50:58 crc kubenswrapper[4492]: E1126 06:50:58.534626 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 06:50:58 crc kubenswrapper[4492]: E1126 06:50:58.535156 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9g6nm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-7gl7t_openshift-marketplace(bff9a13a-5b4a-4789-ab6d-bff2ad038798): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 06:50:58 crc kubenswrapper[4492]: E1126 06:50:58.536424 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-7gl7t" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" Nov 26 06:51:00 crc kubenswrapper[4492]: E1126 06:51:00.313075 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-7gl7t" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" Nov 26 06:51:00 crc kubenswrapper[4492]: E1126 06:51:00.411406 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 06:51:00 crc kubenswrapper[4492]: E1126 06:51:00.411580 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jrwlm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-kt8n4_openshift-marketplace(817798d6-9eae-4ee2-9b2f-53c54772866c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 06:51:00 crc kubenswrapper[4492]: E1126 06:51:00.413243 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-kt8n4" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" Nov 26 06:51:00 crc kubenswrapper[4492]: E1126 06:51:00.464674 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 06:51:00 crc kubenswrapper[4492]: E1126 06:51:00.465084 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cthkd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-l62r5_openshift-marketplace(ec884242-662f-4387-b710-0d6c601a41c4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 06:51:00 crc kubenswrapper[4492]: E1126 06:51:00.466472 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-l62r5" podUID="ec884242-662f-4387-b710-0d6c601a41c4" Nov 26 06:51:00 crc kubenswrapper[4492]: W1126 06:51:00.854548 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-2c97c371ee21a5f52961888b16c5183c36b792a39e0d36cb002f90466049b63a WatchSource:0}: Error finding container 2c97c371ee21a5f52961888b16c5183c36b792a39e0d36cb002f90466049b63a: Status 404 returned error can't find the container with id 2c97c371ee21a5f52961888b16c5183c36b792a39e0d36cb002f90466049b63a Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.349860 4492 generic.go:334] "Generic (PLEG): container finished" podID="abb41535-15fd-41da-9e54-d68ec23a99be" containerID="04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2" exitCode=0 Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.349941 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rl4j" event={"ID":"abb41535-15fd-41da-9e54-d68ec23a99be","Type":"ContainerDied","Data":"04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.356561 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"856856a053ed6e26e3aac9afb30c435548e47254c70ffa81a0d78ce672cc2dae"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.356825 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"497a9aeccd7e9df62915563ce8eba334da5c64d8ccc03b6ee1ba5530331b84bb"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.360628 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkl8m" event={"ID":"21a88fce-0f39-4a48-a0cd-61014b0a6ea1","Type":"ContainerStarted","Data":"145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.362226 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"bf0262f9001e1a70cb6ee86f84f309978fc283399da149fe0f7fb6556139aeb8"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.362256 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"c3c20f4632efccaa0bf6057b1cf2006b153242331d2969d0583af8b30ccf5eab"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.363628 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"a5b4394e1c3d8b43d25d957e6ddcfa3c9a115dfa41d47acb72eae5f4317f7c0a"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.363682 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"2c97c371ee21a5f52961888b16c5183c36b792a39e0d36cb002f90466049b63a"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.363863 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.365415 4492 generic.go:334] "Generic (PLEG): container finished" podID="a236508b-a76f-4029-b748-7bfdbe412825" containerID="5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6" exitCode=0 Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.365484 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d925t" event={"ID":"a236508b-a76f-4029-b748-7bfdbe412825","Type":"ContainerDied","Data":"5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.371792 4492 generic.go:334] "Generic (PLEG): container finished" podID="0abbeecb-4a32-445b-af18-2a6135721e74" containerID="6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c" exitCode=0 Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.372078 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99qpv" event={"ID":"0abbeecb-4a32-445b-af18-2a6135721e74","Type":"ContainerDied","Data":"6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c"} Nov 26 06:51:01 crc kubenswrapper[4492]: I1126 06:51:01.373816 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6grh" event={"ID":"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a","Type":"ContainerStarted","Data":"60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208"} Nov 26 06:51:01 crc kubenswrapper[4492]: E1126 06:51:01.375250 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-kt8n4" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" Nov 26 06:51:01 crc kubenswrapper[4492]: E1126 06:51:01.375894 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-l62r5" podUID="ec884242-662f-4387-b710-0d6c601a41c4" Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.383474 4492 generic.go:334] "Generic (PLEG): container finished" podID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerID="145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf" exitCode=0 Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.383927 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkl8m" event={"ID":"21a88fce-0f39-4a48-a0cd-61014b0a6ea1","Type":"ContainerDied","Data":"145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf"} Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.388349 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d925t" event={"ID":"a236508b-a76f-4029-b748-7bfdbe412825","Type":"ContainerStarted","Data":"7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f"} Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.392749 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99qpv" event={"ID":"0abbeecb-4a32-445b-af18-2a6135721e74","Type":"ContainerStarted","Data":"7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c"} Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.395752 4492 generic.go:334] "Generic (PLEG): container finished" podID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerID="60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208" exitCode=0 Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.395812 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6grh" event={"ID":"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a","Type":"ContainerDied","Data":"60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208"} Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.395832 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6grh" event={"ID":"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a","Type":"ContainerStarted","Data":"0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9"} Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.398266 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rl4j" event={"ID":"abb41535-15fd-41da-9e54-d68ec23a99be","Type":"ContainerStarted","Data":"07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5"} Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.458270 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7rl4j" podStartSLOduration=2.53456759 podStartE2EDuration="31.458251848s" podCreationTimestamp="2025-11-26 06:50:31 +0000 UTC" firstStartedPulling="2025-11-26 06:50:32.937115844 +0000 UTC m=+128.821004143" lastFinishedPulling="2025-11-26 06:51:01.860800103 +0000 UTC m=+157.744688401" observedRunningTime="2025-11-26 06:51:02.441383469 +0000 UTC m=+158.325271758" watchObservedRunningTime="2025-11-26 06:51:02.458251848 +0000 UTC m=+158.342140146" Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.488167 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d925t" podStartSLOduration=2.4424364 podStartE2EDuration="33.488150332s" podCreationTimestamp="2025-11-26 06:50:29 +0000 UTC" firstStartedPulling="2025-11-26 06:50:30.827402575 +0000 UTC m=+126.711290872" lastFinishedPulling="2025-11-26 06:51:01.873116506 +0000 UTC m=+157.757004804" observedRunningTime="2025-11-26 06:51:02.463631741 +0000 UTC m=+158.347520029" watchObservedRunningTime="2025-11-26 06:51:02.488150332 +0000 UTC m=+158.372038630" Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.488471 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-99qpv" podStartSLOduration=3.473597727 podStartE2EDuration="33.488466535s" podCreationTimestamp="2025-11-26 06:50:29 +0000 UTC" firstStartedPulling="2025-11-26 06:50:31.848692947 +0000 UTC m=+127.732581245" lastFinishedPulling="2025-11-26 06:51:01.863561755 +0000 UTC m=+157.747450053" observedRunningTime="2025-11-26 06:51:02.486279775 +0000 UTC m=+158.370168093" watchObservedRunningTime="2025-11-26 06:51:02.488466535 +0000 UTC m=+158.372354834" Nov 26 06:51:02 crc kubenswrapper[4492]: I1126 06:51:02.505515 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-m6grh" podStartSLOduration=3.663747453 podStartE2EDuration="30.505501949s" podCreationTimestamp="2025-11-26 06:50:32 +0000 UTC" firstStartedPulling="2025-11-26 06:50:35.046250547 +0000 UTC m=+130.930138845" lastFinishedPulling="2025-11-26 06:51:01.888005043 +0000 UTC m=+157.771893341" observedRunningTime="2025-11-26 06:51:02.502722183 +0000 UTC m=+158.386610481" watchObservedRunningTime="2025-11-26 06:51:02.505501949 +0000 UTC m=+158.389390247" Nov 26 06:51:03 crc kubenswrapper[4492]: I1126 06:51:03.082030 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:51:03 crc kubenswrapper[4492]: I1126 06:51:03.082074 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:51:03 crc kubenswrapper[4492]: I1126 06:51:03.405476 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkl8m" event={"ID":"21a88fce-0f39-4a48-a0cd-61014b0a6ea1","Type":"ContainerStarted","Data":"bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397"} Nov 26 06:51:04 crc kubenswrapper[4492]: I1126 06:51:04.209639 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-m6grh" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerName="registry-server" probeResult="failure" output=< Nov 26 06:51:04 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 06:51:04 crc kubenswrapper[4492]: > Nov 26 06:51:05 crc kubenswrapper[4492]: I1126 06:51:05.401127 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-ftkfp" Nov 26 06:51:05 crc kubenswrapper[4492]: I1126 06:51:05.419898 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qkl8m" podStartSLOduration=5.589004082 podStartE2EDuration="33.419871555s" podCreationTimestamp="2025-11-26 06:50:32 +0000 UTC" firstStartedPulling="2025-11-26 06:50:35.046571099 +0000 UTC m=+130.930459427" lastFinishedPulling="2025-11-26 06:51:02.877438602 +0000 UTC m=+158.761326900" observedRunningTime="2025-11-26 06:51:03.423149454 +0000 UTC m=+159.307037742" watchObservedRunningTime="2025-11-26 06:51:05.419871555 +0000 UTC m=+161.303759842" Nov 26 06:51:09 crc kubenswrapper[4492]: I1126 06:51:09.538891 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d925t" Nov 26 06:51:09 crc kubenswrapper[4492]: I1126 06:51:09.539258 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d925t" Nov 26 06:51:09 crc kubenswrapper[4492]: I1126 06:51:09.573321 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d925t" Nov 26 06:51:09 crc kubenswrapper[4492]: I1126 06:51:09.894654 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:51:09 crc kubenswrapper[4492]: I1126 06:51:09.895441 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:51:09 crc kubenswrapper[4492]: I1126 06:51:09.924265 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:51:10 crc kubenswrapper[4492]: I1126 06:51:10.212435 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:51:10 crc kubenswrapper[4492]: I1126 06:51:10.227831 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1cc59fbe-82e1-406b-95b1-a26b6b8ef467-metrics-certs\") pod \"network-metrics-daemon-s4gtb\" (UID: \"1cc59fbe-82e1-406b-95b1-a26b6b8ef467\") " pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:51:10 crc kubenswrapper[4492]: I1126 06:51:10.275154 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-s4gtb" Nov 26 06:51:10 crc kubenswrapper[4492]: I1126 06:51:10.479006 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d925t" Nov 26 06:51:10 crc kubenswrapper[4492]: I1126 06:51:10.480205 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:51:10 crc kubenswrapper[4492]: I1126 06:51:10.641326 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-s4gtb"] Nov 26 06:51:10 crc kubenswrapper[4492]: I1126 06:51:10.804615 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-99qpv"] Nov 26 06:51:11 crc kubenswrapper[4492]: I1126 06:51:11.455936 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" event={"ID":"1cc59fbe-82e1-406b-95b1-a26b6b8ef467","Type":"ContainerStarted","Data":"5344941df477fe3d927f1b6127fc0cec3ded2e99c5e29e5cf1b880b162e5b9e5"} Nov 26 06:51:11 crc kubenswrapper[4492]: I1126 06:51:11.456280 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" event={"ID":"1cc59fbe-82e1-406b-95b1-a26b6b8ef467","Type":"ContainerStarted","Data":"8bcf384e954e55180727d606f8e863278224bd5313ca1da2338fba62b5d2137f"} Nov 26 06:51:11 crc kubenswrapper[4492]: I1126 06:51:11.456295 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-s4gtb" event={"ID":"1cc59fbe-82e1-406b-95b1-a26b6b8ef467","Type":"ContainerStarted","Data":"6a8f448c87c38f8411917c4cd02e8b5af30e64fa5e21d96d8cbe2142240ac4fb"} Nov 26 06:51:11 crc kubenswrapper[4492]: I1126 06:51:11.471053 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-s4gtb" podStartSLOduration=143.471027833 podStartE2EDuration="2m23.471027833s" podCreationTimestamp="2025-11-26 06:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:51:11.4675845 +0000 UTC m=+167.351472798" watchObservedRunningTime="2025-11-26 06:51:11.471027833 +0000 UTC m=+167.354916122" Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.277462 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.278226 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.318898 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.461088 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-99qpv" podUID="0abbeecb-4a32-445b-af18-2a6135721e74" containerName="registry-server" containerID="cri-o://7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c" gracePeriod=2 Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.491968 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.848647 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.948192 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsdmv\" (UniqueName: \"kubernetes.io/projected/0abbeecb-4a32-445b-af18-2a6135721e74-kube-api-access-xsdmv\") pod \"0abbeecb-4a32-445b-af18-2a6135721e74\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.948550 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-catalog-content\") pod \"0abbeecb-4a32-445b-af18-2a6135721e74\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.948670 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-utilities\") pod \"0abbeecb-4a32-445b-af18-2a6135721e74\" (UID: \"0abbeecb-4a32-445b-af18-2a6135721e74\") " Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.949440 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-utilities" (OuterVolumeSpecName: "utilities") pod "0abbeecb-4a32-445b-af18-2a6135721e74" (UID: "0abbeecb-4a32-445b-af18-2a6135721e74"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.955085 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0abbeecb-4a32-445b-af18-2a6135721e74-kube-api-access-xsdmv" (OuterVolumeSpecName: "kube-api-access-xsdmv") pod "0abbeecb-4a32-445b-af18-2a6135721e74" (UID: "0abbeecb-4a32-445b-af18-2a6135721e74"). InnerVolumeSpecName "kube-api-access-xsdmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:51:12 crc kubenswrapper[4492]: I1126 06:51:12.989548 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0abbeecb-4a32-445b-af18-2a6135721e74" (UID: "0abbeecb-4a32-445b-af18-2a6135721e74"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.051085 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsdmv\" (UniqueName: \"kubernetes.io/projected/0abbeecb-4a32-445b-af18-2a6135721e74-kube-api-access-xsdmv\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.051114 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.051123 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0abbeecb-4a32-445b-af18-2a6135721e74-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.111790 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.140451 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.320167 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.320242 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.352013 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.470777 4492 generic.go:334] "Generic (PLEG): container finished" podID="0abbeecb-4a32-445b-af18-2a6135721e74" containerID="7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c" exitCode=0 Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.470828 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99qpv" event={"ID":"0abbeecb-4a32-445b-af18-2a6135721e74","Type":"ContainerDied","Data":"7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c"} Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.470847 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-99qpv" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.470873 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-99qpv" event={"ID":"0abbeecb-4a32-445b-af18-2a6135721e74","Type":"ContainerDied","Data":"9e4a811354ca8974df2de17bcafc50d113ba5e53c6924a8d3c3365df5b867eba"} Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.470896 4492 scope.go:117] "RemoveContainer" containerID="7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.490660 4492 scope.go:117] "RemoveContainer" containerID="6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.500519 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-99qpv"] Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.502610 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-99qpv"] Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.515856 4492 scope.go:117] "RemoveContainer" containerID="bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.516224 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.533543 4492 scope.go:117] "RemoveContainer" containerID="7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c" Nov 26 06:51:13 crc kubenswrapper[4492]: E1126 06:51:13.533937 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c\": container with ID starting with 7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c not found: ID does not exist" containerID="7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.533970 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c"} err="failed to get container status \"7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c\": rpc error: code = NotFound desc = could not find container \"7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c\": container with ID starting with 7911877b43a8ad7233548f5b337ce55f4982497d827c774655e19ae5e818894c not found: ID does not exist" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.534049 4492 scope.go:117] "RemoveContainer" containerID="6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c" Nov 26 06:51:13 crc kubenswrapper[4492]: E1126 06:51:13.534367 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c\": container with ID starting with 6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c not found: ID does not exist" containerID="6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.534403 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c"} err="failed to get container status \"6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c\": rpc error: code = NotFound desc = could not find container \"6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c\": container with ID starting with 6335aea9ff700fb9ecfdd8c6004f238f2c26c6346b39e629e511ff642b67674c not found: ID does not exist" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.534429 4492 scope.go:117] "RemoveContainer" containerID="bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399" Nov 26 06:51:13 crc kubenswrapper[4492]: E1126 06:51:13.534688 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399\": container with ID starting with bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399 not found: ID does not exist" containerID="bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399" Nov 26 06:51:13 crc kubenswrapper[4492]: I1126 06:51:13.534713 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399"} err="failed to get container status \"bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399\": rpc error: code = NotFound desc = could not find container \"bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399\": container with ID starting with bfdd759c09088880615aef318f9fec25ef423cf8fc1acd20c74e0a3b5886a399 not found: ID does not exist" Nov 26 06:51:14 crc kubenswrapper[4492]: I1126 06:51:14.445213 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0abbeecb-4a32-445b-af18-2a6135721e74" path="/var/lib/kubelet/pods/0abbeecb-4a32-445b-af18-2a6135721e74/volumes" Nov 26 06:51:14 crc kubenswrapper[4492]: I1126 06:51:14.480408 4492 generic.go:334] "Generic (PLEG): container finished" podID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerID="86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033" exitCode=0 Nov 26 06:51:14 crc kubenswrapper[4492]: I1126 06:51:14.480903 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7gl7t" event={"ID":"bff9a13a-5b4a-4789-ab6d-bff2ad038798","Type":"ContainerDied","Data":"86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033"} Nov 26 06:51:15 crc kubenswrapper[4492]: I1126 06:51:15.488187 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7gl7t" event={"ID":"bff9a13a-5b4a-4789-ab6d-bff2ad038798","Type":"ContainerStarted","Data":"45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2"} Nov 26 06:51:15 crc kubenswrapper[4492]: I1126 06:51:15.490973 4492 generic.go:334] "Generic (PLEG): container finished" podID="ec884242-662f-4387-b710-0d6c601a41c4" containerID="7fc58af892d47d89b19c72281be1c4125850ffa555a4ab31ca6e4a33a551dd1f" exitCode=0 Nov 26 06:51:15 crc kubenswrapper[4492]: I1126 06:51:15.491028 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l62r5" event={"ID":"ec884242-662f-4387-b710-0d6c601a41c4","Type":"ContainerDied","Data":"7fc58af892d47d89b19c72281be1c4125850ffa555a4ab31ca6e4a33a551dd1f"} Nov 26 06:51:15 crc kubenswrapper[4492]: I1126 06:51:15.505805 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7gl7t" podStartSLOduration=2.487711099 podStartE2EDuration="43.505782168s" podCreationTimestamp="2025-11-26 06:50:32 +0000 UTC" firstStartedPulling="2025-11-26 06:50:33.974932332 +0000 UTC m=+129.858820631" lastFinishedPulling="2025-11-26 06:51:14.993003401 +0000 UTC m=+170.876891700" observedRunningTime="2025-11-26 06:51:15.504472997 +0000 UTC m=+171.388361295" watchObservedRunningTime="2025-11-26 06:51:15.505782168 +0000 UTC m=+171.389670466" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.002743 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qkl8m"] Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.003048 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qkl8m" podUID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerName="registry-server" containerID="cri-o://bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397" gracePeriod=2 Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.371544 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.498130 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-catalog-content\") pod \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.498235 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-utilities\") pod \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.498308 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhb29\" (UniqueName: \"kubernetes.io/projected/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-kube-api-access-fhb29\") pod \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\" (UID: \"21a88fce-0f39-4a48-a0cd-61014b0a6ea1\") " Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.499068 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-utilities" (OuterVolumeSpecName: "utilities") pod "21a88fce-0f39-4a48-a0cd-61014b0a6ea1" (UID: "21a88fce-0f39-4a48-a0cd-61014b0a6ea1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.499123 4492 generic.go:334] "Generic (PLEG): container finished" podID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerID="bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397" exitCode=0 Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.499231 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkl8m" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.499238 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkl8m" event={"ID":"21a88fce-0f39-4a48-a0cd-61014b0a6ea1","Type":"ContainerDied","Data":"bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397"} Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.499277 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkl8m" event={"ID":"21a88fce-0f39-4a48-a0cd-61014b0a6ea1","Type":"ContainerDied","Data":"a73794f730de375c8b01d984b30d7c1d964a54b79ebc7515b8e653405bcd8f45"} Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.499303 4492 scope.go:117] "RemoveContainer" containerID="bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.505255 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-kube-api-access-fhb29" (OuterVolumeSpecName: "kube-api-access-fhb29") pod "21a88fce-0f39-4a48-a0cd-61014b0a6ea1" (UID: "21a88fce-0f39-4a48-a0cd-61014b0a6ea1"). InnerVolumeSpecName "kube-api-access-fhb29". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.505934 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l62r5" event={"ID":"ec884242-662f-4387-b710-0d6c601a41c4","Type":"ContainerStarted","Data":"668796da89f22e2041c4813ef1d5fdc136d5d7e30a86e72f0e666460b45b5ebb"} Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.528662 4492 scope.go:117] "RemoveContainer" containerID="145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.539742 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l62r5" podStartSLOduration=2.38196144 podStartE2EDuration="46.539731666s" podCreationTimestamp="2025-11-26 06:50:30 +0000 UTC" firstStartedPulling="2025-11-26 06:50:31.859792071 +0000 UTC m=+127.743680369" lastFinishedPulling="2025-11-26 06:51:16.017562297 +0000 UTC m=+171.901450595" observedRunningTime="2025-11-26 06:51:16.536559243 +0000 UTC m=+172.420447561" watchObservedRunningTime="2025-11-26 06:51:16.539731666 +0000 UTC m=+172.423619965" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.549266 4492 scope.go:117] "RemoveContainer" containerID="f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.564065 4492 scope.go:117] "RemoveContainer" containerID="bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397" Nov 26 06:51:16 crc kubenswrapper[4492]: E1126 06:51:16.564396 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397\": container with ID starting with bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397 not found: ID does not exist" containerID="bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.564434 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397"} err="failed to get container status \"bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397\": rpc error: code = NotFound desc = could not find container \"bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397\": container with ID starting with bd0cc87b1a33f4a003e8f9958d8cd3e1a10d3ec01d378c2b6517dc1a5e5a1397 not found: ID does not exist" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.564457 4492 scope.go:117] "RemoveContainer" containerID="145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf" Nov 26 06:51:16 crc kubenswrapper[4492]: E1126 06:51:16.564774 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf\": container with ID starting with 145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf not found: ID does not exist" containerID="145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.564798 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf"} err="failed to get container status \"145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf\": rpc error: code = NotFound desc = could not find container \"145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf\": container with ID starting with 145a12f09d0cbed32fa49a846eb41ed7e2104fb5b2dccb6b1bed3e676aab1abf not found: ID does not exist" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.564814 4492 scope.go:117] "RemoveContainer" containerID="f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb" Nov 26 06:51:16 crc kubenswrapper[4492]: E1126 06:51:16.565060 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb\": container with ID starting with f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb not found: ID does not exist" containerID="f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.565081 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb"} err="failed to get container status \"f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb\": rpc error: code = NotFound desc = could not find container \"f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb\": container with ID starting with f4f299aefb335564210b77897ff3b65ef222adffb3b58253f3d28edee951d3fb not found: ID does not exist" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.582360 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "21a88fce-0f39-4a48-a0cd-61014b0a6ea1" (UID: "21a88fce-0f39-4a48-a0cd-61014b0a6ea1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.599756 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhb29\" (UniqueName: \"kubernetes.io/projected/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-kube-api-access-fhb29\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.599779 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.599789 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21a88fce-0f39-4a48-a0cd-61014b0a6ea1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.823362 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qkl8m"] Nov 26 06:51:16 crc kubenswrapper[4492]: I1126 06:51:16.825344 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qkl8m"] Nov 26 06:51:17 crc kubenswrapper[4492]: I1126 06:51:17.516417 4492 generic.go:334] "Generic (PLEG): container finished" podID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerID="2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d" exitCode=0 Nov 26 06:51:17 crc kubenswrapper[4492]: I1126 06:51:17.516498 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8n4" event={"ID":"817798d6-9eae-4ee2-9b2f-53c54772866c","Type":"ContainerDied","Data":"2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d"} Nov 26 06:51:18 crc kubenswrapper[4492]: I1126 06:51:18.444815 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" path="/var/lib/kubelet/pods/21a88fce-0f39-4a48-a0cd-61014b0a6ea1/volumes" Nov 26 06:51:18 crc kubenswrapper[4492]: I1126 06:51:18.527091 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8n4" event={"ID":"817798d6-9eae-4ee2-9b2f-53c54772866c","Type":"ContainerStarted","Data":"6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53"} Nov 26 06:51:18 crc kubenswrapper[4492]: I1126 06:51:18.549697 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kt8n4" podStartSLOduration=3.27937578 podStartE2EDuration="49.549674239s" podCreationTimestamp="2025-11-26 06:50:29 +0000 UTC" firstStartedPulling="2025-11-26 06:50:31.854622893 +0000 UTC m=+127.738511181" lastFinishedPulling="2025-11-26 06:51:18.124921343 +0000 UTC m=+174.008809640" observedRunningTime="2025-11-26 06:51:18.546315024 +0000 UTC m=+174.430203322" watchObservedRunningTime="2025-11-26 06:51:18.549674239 +0000 UTC m=+174.433562537" Nov 26 06:51:19 crc kubenswrapper[4492]: I1126 06:51:19.441281 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:51:19 crc kubenswrapper[4492]: I1126 06:51:19.441355 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:51:20 crc kubenswrapper[4492]: I1126 06:51:20.319127 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:51:20 crc kubenswrapper[4492]: I1126 06:51:20.319436 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:51:20 crc kubenswrapper[4492]: I1126 06:51:20.346114 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:51:20 crc kubenswrapper[4492]: I1126 06:51:20.495202 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:51:20 crc kubenswrapper[4492]: I1126 06:51:20.496036 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:51:20 crc kubenswrapper[4492]: I1126 06:51:20.530724 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:51:22 crc kubenswrapper[4492]: I1126 06:51:22.685388 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:51:22 crc kubenswrapper[4492]: I1126 06:51:22.685772 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:51:22 crc kubenswrapper[4492]: I1126 06:51:22.719443 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:51:23 crc kubenswrapper[4492]: I1126 06:51:23.597563 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.599876 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7gl7t"] Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.600467 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7gl7t" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerName="registry-server" containerID="cri-o://45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2" gracePeriod=2 Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.725904 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 06:51:26 crc kubenswrapper[4492]: E1126 06:51:26.726354 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerName="registry-server" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726379 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerName="registry-server" Nov 26 06:51:26 crc kubenswrapper[4492]: E1126 06:51:26.726399 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0abbeecb-4a32-445b-af18-2a6135721e74" containerName="extract-content" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726406 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0abbeecb-4a32-445b-af18-2a6135721e74" containerName="extract-content" Nov 26 06:51:26 crc kubenswrapper[4492]: E1126 06:51:26.726422 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerName="extract-content" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726427 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerName="extract-content" Nov 26 06:51:26 crc kubenswrapper[4492]: E1126 06:51:26.726435 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef0c3ab-34e2-4c7c-972f-cec18fb94b57" containerName="pruner" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726440 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef0c3ab-34e2-4c7c-972f-cec18fb94b57" containerName="pruner" Nov 26 06:51:26 crc kubenswrapper[4492]: E1126 06:51:26.726446 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="261335b4-4d32-402e-8c7f-19d07db47a2a" containerName="pruner" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726451 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="261335b4-4d32-402e-8c7f-19d07db47a2a" containerName="pruner" Nov 26 06:51:26 crc kubenswrapper[4492]: E1126 06:51:26.726458 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerName="extract-utilities" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726465 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerName="extract-utilities" Nov 26 06:51:26 crc kubenswrapper[4492]: E1126 06:51:26.726473 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0abbeecb-4a32-445b-af18-2a6135721e74" containerName="registry-server" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726479 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0abbeecb-4a32-445b-af18-2a6135721e74" containerName="registry-server" Nov 26 06:51:26 crc kubenswrapper[4492]: E1126 06:51:26.726495 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0abbeecb-4a32-445b-af18-2a6135721e74" containerName="extract-utilities" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726501 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0abbeecb-4a32-445b-af18-2a6135721e74" containerName="extract-utilities" Nov 26 06:51:26 crc kubenswrapper[4492]: E1126 06:51:26.726512 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c5436ae-c6b0-4c8e-b45f-e580fef03690" containerName="collect-profiles" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726518 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c5436ae-c6b0-4c8e-b45f-e580fef03690" containerName="collect-profiles" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726640 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c5436ae-c6b0-4c8e-b45f-e580fef03690" containerName="collect-profiles" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726653 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="21a88fce-0f39-4a48-a0cd-61014b0a6ea1" containerName="registry-server" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726661 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="bef0c3ab-34e2-4c7c-972f-cec18fb94b57" containerName="pruner" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726669 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="261335b4-4d32-402e-8c7f-19d07db47a2a" containerName="pruner" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.726679 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0abbeecb-4a32-445b-af18-2a6135721e74" containerName="registry-server" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.727187 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.734127 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.734673 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.743803 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.743860 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.743937 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.845673 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.846043 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.845932 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.880590 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:26 crc kubenswrapper[4492]: I1126 06:51:26.947385 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.048257 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9g6nm\" (UniqueName: \"kubernetes.io/projected/bff9a13a-5b4a-4789-ab6d-bff2ad038798-kube-api-access-9g6nm\") pod \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.048349 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-utilities\") pod \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.048412 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-catalog-content\") pod \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\" (UID: \"bff9a13a-5b4a-4789-ab6d-bff2ad038798\") " Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.048804 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.048937 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-utilities" (OuterVolumeSpecName: "utilities") pod "bff9a13a-5b4a-4789-ab6d-bff2ad038798" (UID: "bff9a13a-5b4a-4789-ab6d-bff2ad038798"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.050613 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bff9a13a-5b4a-4789-ab6d-bff2ad038798-kube-api-access-9g6nm" (OuterVolumeSpecName: "kube-api-access-9g6nm") pod "bff9a13a-5b4a-4789-ab6d-bff2ad038798" (UID: "bff9a13a-5b4a-4789-ab6d-bff2ad038798"). InnerVolumeSpecName "kube-api-access-9g6nm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.063141 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bff9a13a-5b4a-4789-ab6d-bff2ad038798" (UID: "bff9a13a-5b4a-4789-ab6d-bff2ad038798"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.149664 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.149694 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff9a13a-5b4a-4789-ab6d-bff2ad038798-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.149709 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9g6nm\" (UniqueName: \"kubernetes.io/projected/bff9a13a-5b4a-4789-ab6d-bff2ad038798-kube-api-access-9g6nm\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.418206 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 06:51:27 crc kubenswrapper[4492]: W1126 06:51:27.420864 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podd1f1e087_75f3_4e15_b556_ccd3642cdd5e.slice/crio-b8cd03f507ac28cfe932ce889126ee831a74be83c409efead8ac088f5e544a4b WatchSource:0}: Error finding container b8cd03f507ac28cfe932ce889126ee831a74be83c409efead8ac088f5e544a4b: Status 404 returned error can't find the container with id b8cd03f507ac28cfe932ce889126ee831a74be83c409efead8ac088f5e544a4b Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.592516 4492 generic.go:334] "Generic (PLEG): container finished" podID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerID="45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2" exitCode=0 Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.592624 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7gl7t" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.592638 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7gl7t" event={"ID":"bff9a13a-5b4a-4789-ab6d-bff2ad038798","Type":"ContainerDied","Data":"45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2"} Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.592965 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7gl7t" event={"ID":"bff9a13a-5b4a-4789-ab6d-bff2ad038798","Type":"ContainerDied","Data":"c66af40222a1ffbc2650500d0edb317cf2d1a52177e290461780745ae64e519e"} Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.593006 4492 scope.go:117] "RemoveContainer" containerID="45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.596911 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d1f1e087-75f3-4e15-b556-ccd3642cdd5e","Type":"ContainerStarted","Data":"b8cd03f507ac28cfe932ce889126ee831a74be83c409efead8ac088f5e544a4b"} Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.615430 4492 scope.go:117] "RemoveContainer" containerID="86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.628051 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7gl7t"] Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.630182 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7gl7t"] Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.636734 4492 scope.go:117] "RemoveContainer" containerID="404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.646733 4492 scope.go:117] "RemoveContainer" containerID="45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2" Nov 26 06:51:27 crc kubenswrapper[4492]: E1126 06:51:27.647116 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2\": container with ID starting with 45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2 not found: ID does not exist" containerID="45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.647189 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2"} err="failed to get container status \"45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2\": rpc error: code = NotFound desc = could not find container \"45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2\": container with ID starting with 45e588b84aa6d7ad96de906e04e64b19320aad155a33c432ddd0701eb5328ce2 not found: ID does not exist" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.647213 4492 scope.go:117] "RemoveContainer" containerID="86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033" Nov 26 06:51:27 crc kubenswrapper[4492]: E1126 06:51:27.647559 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033\": container with ID starting with 86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033 not found: ID does not exist" containerID="86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.647592 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033"} err="failed to get container status \"86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033\": rpc error: code = NotFound desc = could not find container \"86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033\": container with ID starting with 86e235d9bdef803528a7730f7c1dd9a9bc11704abd9b62334b5f47fb9ead6033 not found: ID does not exist" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.647619 4492 scope.go:117] "RemoveContainer" containerID="404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a" Nov 26 06:51:27 crc kubenswrapper[4492]: E1126 06:51:27.648018 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a\": container with ID starting with 404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a not found: ID does not exist" containerID="404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a" Nov 26 06:51:27 crc kubenswrapper[4492]: I1126 06:51:27.648053 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a"} err="failed to get container status \"404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a\": rpc error: code = NotFound desc = could not find container \"404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a\": container with ID starting with 404895fa5a739b7cbe56ddfd34dab03c089ff198b5fcd82f9d0d7142e1426c3a not found: ID does not exist" Nov 26 06:51:28 crc kubenswrapper[4492]: I1126 06:51:28.448159 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" path="/var/lib/kubelet/pods/bff9a13a-5b4a-4789-ab6d-bff2ad038798/volumes" Nov 26 06:51:28 crc kubenswrapper[4492]: I1126 06:51:28.609192 4492 generic.go:334] "Generic (PLEG): container finished" podID="d1f1e087-75f3-4e15-b556-ccd3642cdd5e" containerID="045063eb06c22f175dae1d26c644f80de72936608c357eb4f849f8b0f9d2b31e" exitCode=0 Nov 26 06:51:28 crc kubenswrapper[4492]: I1126 06:51:28.609335 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d1f1e087-75f3-4e15-b556-ccd3642cdd5e","Type":"ContainerDied","Data":"045063eb06c22f175dae1d26c644f80de72936608c357eb4f849f8b0f9d2b31e"} Nov 26 06:51:29 crc kubenswrapper[4492]: I1126 06:51:29.814701 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:29 crc kubenswrapper[4492]: I1126 06:51:29.990337 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kubelet-dir\") pod \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\" (UID: \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\") " Nov 26 06:51:29 crc kubenswrapper[4492]: I1126 06:51:29.990440 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kube-api-access\") pod \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\" (UID: \"d1f1e087-75f3-4e15-b556-ccd3642cdd5e\") " Nov 26 06:51:29 crc kubenswrapper[4492]: I1126 06:51:29.990486 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d1f1e087-75f3-4e15-b556-ccd3642cdd5e" (UID: "d1f1e087-75f3-4e15-b556-ccd3642cdd5e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:51:29 crc kubenswrapper[4492]: I1126 06:51:29.991038 4492 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:29 crc kubenswrapper[4492]: I1126 06:51:29.996374 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d1f1e087-75f3-4e15-b556-ccd3642cdd5e" (UID: "d1f1e087-75f3-4e15-b556-ccd3642cdd5e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:51:30 crc kubenswrapper[4492]: I1126 06:51:30.092024 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1f1e087-75f3-4e15-b556-ccd3642cdd5e-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:30 crc kubenswrapper[4492]: I1126 06:51:30.349028 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:51:30 crc kubenswrapper[4492]: I1126 06:51:30.354864 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 06:51:30 crc kubenswrapper[4492]: I1126 06:51:30.528656 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:51:30 crc kubenswrapper[4492]: I1126 06:51:30.622340 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d1f1e087-75f3-4e15-b556-ccd3642cdd5e","Type":"ContainerDied","Data":"b8cd03f507ac28cfe932ce889126ee831a74be83c409efead8ac088f5e544a4b"} Nov 26 06:51:30 crc kubenswrapper[4492]: I1126 06:51:30.622373 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 06:51:30 crc kubenswrapper[4492]: I1126 06:51:30.622381 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8cd03f507ac28cfe932ce889126ee831a74be83c409efead8ac088f5e544a4b" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.401918 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l62r5"] Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.402240 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l62r5" podUID="ec884242-662f-4387-b710-0d6c601a41c4" containerName="registry-server" containerID="cri-o://668796da89f22e2041c4813ef1d5fdc136d5d7e30a86e72f0e666460b45b5ebb" gracePeriod=2 Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.639966 4492 generic.go:334] "Generic (PLEG): container finished" podID="ec884242-662f-4387-b710-0d6c601a41c4" containerID="668796da89f22e2041c4813ef1d5fdc136d5d7e30a86e72f0e666460b45b5ebb" exitCode=0 Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.640026 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l62r5" event={"ID":"ec884242-662f-4387-b710-0d6c601a41c4","Type":"ContainerDied","Data":"668796da89f22e2041c4813ef1d5fdc136d5d7e30a86e72f0e666460b45b5ebb"} Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.739364 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.827639 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cthkd\" (UniqueName: \"kubernetes.io/projected/ec884242-662f-4387-b710-0d6c601a41c4-kube-api-access-cthkd\") pod \"ec884242-662f-4387-b710-0d6c601a41c4\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.827745 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-catalog-content\") pod \"ec884242-662f-4387-b710-0d6c601a41c4\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.827808 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-utilities\") pod \"ec884242-662f-4387-b710-0d6c601a41c4\" (UID: \"ec884242-662f-4387-b710-0d6c601a41c4\") " Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.829004 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-utilities" (OuterVolumeSpecName: "utilities") pod "ec884242-662f-4387-b710-0d6c601a41c4" (UID: "ec884242-662f-4387-b710-0d6c601a41c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.846943 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec884242-662f-4387-b710-0d6c601a41c4-kube-api-access-cthkd" (OuterVolumeSpecName: "kube-api-access-cthkd") pod "ec884242-662f-4387-b710-0d6c601a41c4" (UID: "ec884242-662f-4387-b710-0d6c601a41c4"). InnerVolumeSpecName "kube-api-access-cthkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.871540 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ec884242-662f-4387-b710-0d6c601a41c4" (UID: "ec884242-662f-4387-b710-0d6c601a41c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919141 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 06:51:32 crc kubenswrapper[4492]: E1126 06:51:32.919392 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerName="extract-utilities" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919412 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerName="extract-utilities" Nov 26 06:51:32 crc kubenswrapper[4492]: E1126 06:51:32.919425 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec884242-662f-4387-b710-0d6c601a41c4" containerName="extract-content" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919431 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec884242-662f-4387-b710-0d6c601a41c4" containerName="extract-content" Nov 26 06:51:32 crc kubenswrapper[4492]: E1126 06:51:32.919448 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerName="extract-content" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919455 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerName="extract-content" Nov 26 06:51:32 crc kubenswrapper[4492]: E1126 06:51:32.919464 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec884242-662f-4387-b710-0d6c601a41c4" containerName="registry-server" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919469 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec884242-662f-4387-b710-0d6c601a41c4" containerName="registry-server" Nov 26 06:51:32 crc kubenswrapper[4492]: E1126 06:51:32.919477 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1f1e087-75f3-4e15-b556-ccd3642cdd5e" containerName="pruner" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919483 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1f1e087-75f3-4e15-b556-ccd3642cdd5e" containerName="pruner" Nov 26 06:51:32 crc kubenswrapper[4492]: E1126 06:51:32.919490 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec884242-662f-4387-b710-0d6c601a41c4" containerName="extract-utilities" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919496 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec884242-662f-4387-b710-0d6c601a41c4" containerName="extract-utilities" Nov 26 06:51:32 crc kubenswrapper[4492]: E1126 06:51:32.919504 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerName="registry-server" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919509 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerName="registry-server" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919597 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1f1e087-75f3-4e15-b556-ccd3642cdd5e" containerName="pruner" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919607 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="bff9a13a-5b4a-4789-ab6d-bff2ad038798" containerName="registry-server" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919615 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec884242-662f-4387-b710-0d6c601a41c4" containerName="registry-server" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.919971 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.923575 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.923882 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.929338 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-var-lock\") pod \"installer-9-crc\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.929364 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.929392 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/028bd749-7124-44a5-b43a-61c4378df60d-kube-api-access\") pod \"installer-9-crc\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.929460 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cthkd\" (UniqueName: \"kubernetes.io/projected/ec884242-662f-4387-b710-0d6c601a41c4-kube-api-access-cthkd\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.929471 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.929481 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec884242-662f-4387-b710-0d6c601a41c4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:51:32 crc kubenswrapper[4492]: I1126 06:51:32.934297 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.030658 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/028bd749-7124-44a5-b43a-61c4378df60d-kube-api-access\") pod \"installer-9-crc\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.030747 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-var-lock\") pod \"installer-9-crc\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.030774 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.030837 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-kubelet-dir\") pod \"installer-9-crc\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.030881 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-var-lock\") pod \"installer-9-crc\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.047589 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/028bd749-7124-44a5-b43a-61c4378df60d-kube-api-access\") pod \"installer-9-crc\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.234933 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.588780 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.647349 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"028bd749-7124-44a5-b43a-61c4378df60d","Type":"ContainerStarted","Data":"67654424075af15c9d0edec927cff8a458552356648e2a32014ea4ca8234044b"} Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.650219 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l62r5" event={"ID":"ec884242-662f-4387-b710-0d6c601a41c4","Type":"ContainerDied","Data":"f80bb35757fe5321e47e84f8daad7e8eccf1ec0eec1cbd56feb284094a540202"} Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.650284 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l62r5" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.650308 4492 scope.go:117] "RemoveContainer" containerID="668796da89f22e2041c4813ef1d5fdc136d5d7e30a86e72f0e666460b45b5ebb" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.667665 4492 scope.go:117] "RemoveContainer" containerID="7fc58af892d47d89b19c72281be1c4125850ffa555a4ab31ca6e4a33a551dd1f" Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.680334 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l62r5"] Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.693668 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l62r5"] Nov 26 06:51:33 crc kubenswrapper[4492]: I1126 06:51:33.702706 4492 scope.go:117] "RemoveContainer" containerID="8480ae27267dfea4dc40407b2e90b8d2987df8738a7cbcc4895caf48fda222e7" Nov 26 06:51:34 crc kubenswrapper[4492]: I1126 06:51:34.458187 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec884242-662f-4387-b710-0d6c601a41c4" path="/var/lib/kubelet/pods/ec884242-662f-4387-b710-0d6c601a41c4/volumes" Nov 26 06:51:34 crc kubenswrapper[4492]: I1126 06:51:34.662122 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"028bd749-7124-44a5-b43a-61c4378df60d","Type":"ContainerStarted","Data":"d301b7e148fd7682dbff1658b87971413b52736d7e0d1e12c1825a2f5a062cc3"} Nov 26 06:51:34 crc kubenswrapper[4492]: I1126 06:51:34.682672 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.682654911 podStartE2EDuration="2.682654911s" podCreationTimestamp="2025-11-26 06:51:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:51:34.676417457 +0000 UTC m=+190.560305754" watchObservedRunningTime="2025-11-26 06:51:34.682654911 +0000 UTC m=+190.566543199" Nov 26 06:51:39 crc kubenswrapper[4492]: I1126 06:51:39.556780 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2d74v"] Nov 26 06:51:49 crc kubenswrapper[4492]: I1126 06:51:49.441136 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:51:49 crc kubenswrapper[4492]: I1126 06:51:49.442336 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:51:49 crc kubenswrapper[4492]: I1126 06:51:49.442405 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:51:49 crc kubenswrapper[4492]: I1126 06:51:49.443128 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:51:49 crc kubenswrapper[4492]: I1126 06:51:49.443220 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89" gracePeriod=600 Nov 26 06:51:49 crc kubenswrapper[4492]: I1126 06:51:49.742588 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89" exitCode=0 Nov 26 06:51:49 crc kubenswrapper[4492]: I1126 06:51:49.742689 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89"} Nov 26 06:51:49 crc kubenswrapper[4492]: I1126 06:51:49.742973 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"ebb7c14f054bf1a3e0275de2bfc80f082c3d5f1d3a6a0ca4e02d8ce5ee7ee01b"} Nov 26 06:52:04 crc kubenswrapper[4492]: I1126 06:52:04.585342 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" podUID="a0421328-5a0e-4e84-ba97-1926057962e6" containerName="oauth-openshift" containerID="cri-o://91af68b25ab19bc9e22765eae1ec84d3b920c76e29ea5162a316a57e870907ef" gracePeriod=15 Nov 26 06:52:04 crc kubenswrapper[4492]: I1126 06:52:04.830052 4492 generic.go:334] "Generic (PLEG): container finished" podID="a0421328-5a0e-4e84-ba97-1926057962e6" containerID="91af68b25ab19bc9e22765eae1ec84d3b920c76e29ea5162a316a57e870907ef" exitCode=0 Nov 26 06:52:04 crc kubenswrapper[4492]: I1126 06:52:04.830103 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" event={"ID":"a0421328-5a0e-4e84-ba97-1926057962e6","Type":"ContainerDied","Data":"91af68b25ab19bc9e22765eae1ec84d3b920c76e29ea5162a316a57e870907ef"} Nov 26 06:52:04 crc kubenswrapper[4492]: I1126 06:52:04.915811 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:52:04 crc kubenswrapper[4492]: I1126 06:52:04.950815 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-547dbbf774-q5fdz"] Nov 26 06:52:04 crc kubenswrapper[4492]: E1126 06:52:04.951214 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0421328-5a0e-4e84-ba97-1926057962e6" containerName="oauth-openshift" Nov 26 06:52:04 crc kubenswrapper[4492]: I1126 06:52:04.951248 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0421328-5a0e-4e84-ba97-1926057962e6" containerName="oauth-openshift" Nov 26 06:52:04 crc kubenswrapper[4492]: I1126 06:52:04.951410 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0421328-5a0e-4e84-ba97-1926057962e6" containerName="oauth-openshift" Nov 26 06:52:04 crc kubenswrapper[4492]: I1126 06:52:04.952944 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:04 crc kubenswrapper[4492]: I1126 06:52:04.959679 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-547dbbf774-q5fdz"] Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.016621 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-provider-selection\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.017154 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-trusted-ca-bundle\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.017375 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-ocp-branding-template\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.017477 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-session\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.017582 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-serving-cert\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.017690 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-idp-0-file-data\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.017828 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a0421328-5a0e-4e84-ba97-1926057962e6-audit-dir\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.017932 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-cliconfig\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018038 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018057 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-router-certs\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018249 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-login\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018321 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-audit-policies\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018347 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-service-ca\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018403 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-error\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018435 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfckt\" (UniqueName: \"kubernetes.io/projected/a0421328-5a0e-4e84-ba97-1926057962e6-kube-api-access-kfckt\") pod \"a0421328-5a0e-4e84-ba97-1926057962e6\" (UID: \"a0421328-5a0e-4e84-ba97-1926057962e6\") " Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018782 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018907 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.018948 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a0421328-5a0e-4e84-ba97-1926057962e6-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.020138 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.020218 4492 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.020240 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.020254 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.020265 4492 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a0421328-5a0e-4e84-ba97-1926057962e6-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.028549 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.028969 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.029695 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.029961 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.030137 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0421328-5a0e-4e84-ba97-1926057962e6-kube-api-access-kfckt" (OuterVolumeSpecName: "kube-api-access-kfckt") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "kube-api-access-kfckt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.030398 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.030558 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.031323 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.047625 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "a0421328-5a0e-4e84-ba97-1926057962e6" (UID: "a0421328-5a0e-4e84-ba97-1926057962e6"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121549 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-template-login\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121605 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8w8z\" (UniqueName: \"kubernetes.io/projected/dd3a747a-92f4-40a9-86fe-86cb43a64179-kube-api-access-d8w8z\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121628 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd3a747a-92f4-40a9-86fe-86cb43a64179-audit-dir\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121647 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121668 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121717 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121739 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-template-error\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121756 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-service-ca\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121782 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-session\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121871 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-cliconfig\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.121971 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122019 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-audit-policies\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122049 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-router-certs\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122084 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-serving-cert\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122218 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122250 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122260 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfckt\" (UniqueName: \"kubernetes.io/projected/a0421328-5a0e-4e84-ba97-1926057962e6-kube-api-access-kfckt\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122275 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122287 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122297 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122308 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122318 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122330 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.122340 4492 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a0421328-5a0e-4e84-ba97-1926057962e6-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223029 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-audit-policies\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223068 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223089 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-router-certs\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223111 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-serving-cert\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223151 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-template-login\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223201 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8w8z\" (UniqueName: \"kubernetes.io/projected/dd3a747a-92f4-40a9-86fe-86cb43a64179-kube-api-access-d8w8z\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223229 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223248 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd3a747a-92f4-40a9-86fe-86cb43a64179-audit-dir\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223272 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223292 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223312 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-template-error\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223333 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-service-ca\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223353 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-session\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.223371 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-cliconfig\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.224829 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-service-ca\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.224865 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-audit-policies\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.225018 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/dd3a747a-92f4-40a9-86fe-86cb43a64179-audit-dir\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.225244 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.225485 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-cliconfig\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.227315 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-serving-cert\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.227805 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.228213 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-router-certs\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.228905 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-template-error\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.229144 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.230131 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-session\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.230272 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-user-template-login\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.230913 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/dd3a747a-92f4-40a9-86fe-86cb43a64179-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.239572 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8w8z\" (UniqueName: \"kubernetes.io/projected/dd3a747a-92f4-40a9-86fe-86cb43a64179-kube-api-access-d8w8z\") pod \"oauth-openshift-547dbbf774-q5fdz\" (UID: \"dd3a747a-92f4-40a9-86fe-86cb43a64179\") " pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.271440 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.651120 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-547dbbf774-q5fdz"] Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.838709 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" event={"ID":"dd3a747a-92f4-40a9-86fe-86cb43a64179","Type":"ContainerStarted","Data":"17bec4e63910dbcddb5f0c8f1e5fbf4bfda817b7bb2f217ef868c8cc8ea467da"} Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.840773 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" event={"ID":"a0421328-5a0e-4e84-ba97-1926057962e6","Type":"ContainerDied","Data":"deb95b8b8ef44ecc4fdc442e78af73b544990fcbc79f386e5125605a1d277cb9"} Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.840867 4492 scope.go:117] "RemoveContainer" containerID="91af68b25ab19bc9e22765eae1ec84d3b920c76e29ea5162a316a57e870907ef" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.840867 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-2d74v" Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.873287 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2d74v"] Nov 26 06:52:05 crc kubenswrapper[4492]: I1126 06:52:05.878481 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-2d74v"] Nov 26 06:52:06 crc kubenswrapper[4492]: I1126 06:52:06.446789 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0421328-5a0e-4e84-ba97-1926057962e6" path="/var/lib/kubelet/pods/a0421328-5a0e-4e84-ba97-1926057962e6/volumes" Nov 26 06:52:06 crc kubenswrapper[4492]: I1126 06:52:06.850409 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" event={"ID":"dd3a747a-92f4-40a9-86fe-86cb43a64179","Type":"ContainerStarted","Data":"bf4470ef5d90abada17ba8128010c2798559a040200f0668ada4f91b999a0e35"} Nov 26 06:52:06 crc kubenswrapper[4492]: I1126 06:52:06.850657 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:06 crc kubenswrapper[4492]: I1126 06:52:06.856372 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" Nov 26 06:52:06 crc kubenswrapper[4492]: I1126 06:52:06.873615 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-547dbbf774-q5fdz" podStartSLOduration=27.873599441 podStartE2EDuration="27.873599441s" podCreationTimestamp="2025-11-26 06:51:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:52:06.867864832 +0000 UTC m=+222.751753120" watchObservedRunningTime="2025-11-26 06:52:06.873599441 +0000 UTC m=+222.757487739" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.605661 4492 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.606601 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f" gracePeriod=15 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.606816 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52" gracePeriod=15 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.606878 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028" gracePeriod=15 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.606933 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57" gracePeriod=15 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.606971 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b" gracePeriod=15 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609115 4492 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 06:52:11 crc kubenswrapper[4492]: E1126 06:52:11.609351 4492 file.go:109] "Unable to process watch event" err="can't process config file \"/etc/kubernetes/manifests/kube-apiserver-startup-monitor-pod.yaml\": /etc/kubernetes/manifests/kube-apiserver-startup-monitor-pod.yaml: couldn't parse as pod(Object 'Kind' is missing in 'null'), please check config file" Nov 26 06:52:11 crc kubenswrapper[4492]: E1126 06:52:11.609484 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609502 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 06:52:11 crc kubenswrapper[4492]: E1126 06:52:11.609515 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609523 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 06:52:11 crc kubenswrapper[4492]: E1126 06:52:11.609539 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609545 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 06:52:11 crc kubenswrapper[4492]: E1126 06:52:11.609554 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609561 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 06:52:11 crc kubenswrapper[4492]: E1126 06:52:11.609574 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609580 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 06:52:11 crc kubenswrapper[4492]: E1126 06:52:11.609596 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609601 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609700 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609713 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609720 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609728 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609737 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609746 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 06:52:11 crc kubenswrapper[4492]: E1126 06:52:11.609860 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.609869 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.690203 4492 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.690831 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.691751 4492 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.702426 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: E1126 06:52:11.702443 4492 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 192.168.25.180:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.702465 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.702490 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804364 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804424 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804495 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804534 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804639 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804689 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804714 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804739 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804759 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804800 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.804770 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.882549 4492 generic.go:334] "Generic (PLEG): container finished" podID="028bd749-7124-44a5-b43a-61c4378df60d" containerID="d301b7e148fd7682dbff1658b87971413b52736d7e0d1e12c1825a2f5a062cc3" exitCode=0 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.882629 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"028bd749-7124-44a5-b43a-61c4378df60d","Type":"ContainerDied","Data":"d301b7e148fd7682dbff1658b87971413b52736d7e0d1e12c1825a2f5a062cc3"} Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.883341 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.883693 4492 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.885517 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.886698 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.887255 4492 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52" exitCode=0 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.887280 4492 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028" exitCode=0 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.887290 4492 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57" exitCode=0 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.887298 4492 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b" exitCode=2 Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.887331 4492 scope.go:117] "RemoveContainer" containerID="6a6dd3695118a8c09585a7cfceb42ac5ae5898562c5f6442da6936f849a4e9f8" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906058 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906119 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906149 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906186 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906207 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906304 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906343 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906386 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906390 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:11 crc kubenswrapper[4492]: I1126 06:52:11.906425 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:12 crc kubenswrapper[4492]: I1126 06:52:12.003524 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:12 crc kubenswrapper[4492]: W1126 06:52:12.021891 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-ce85e061298081a29f68bf6d824a89eb18949c3f78850da1adee2fa7c37b7d75 WatchSource:0}: Error finding container ce85e061298081a29f68bf6d824a89eb18949c3f78850da1adee2fa7c37b7d75: Status 404 returned error can't find the container with id ce85e061298081a29f68bf6d824a89eb18949c3f78850da1adee2fa7c37b7d75 Nov 26 06:52:12 crc kubenswrapper[4492]: E1126 06:52:12.024972 4492 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 192.168.25.180:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b7bebde6036b3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 06:52:12.024059571 +0000 UTC m=+227.907947870,LastTimestamp:2025-11-26 06:52:12.024059571 +0000 UTC m=+227.907947870,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 06:52:12 crc kubenswrapper[4492]: I1126 06:52:12.901938 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 06:52:12 crc kubenswrapper[4492]: I1126 06:52:12.905829 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb"} Nov 26 06:52:12 crc kubenswrapper[4492]: I1126 06:52:12.905870 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"ce85e061298081a29f68bf6d824a89eb18949c3f78850da1adee2fa7c37b7d75"} Nov 26 06:52:12 crc kubenswrapper[4492]: E1126 06:52:12.906363 4492 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 192.168.25.180:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:52:12 crc kubenswrapper[4492]: I1126 06:52:12.906529 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.090596 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.091458 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.220523 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/028bd749-7124-44a5-b43a-61c4378df60d-kube-api-access\") pod \"028bd749-7124-44a5-b43a-61c4378df60d\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.220573 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-kubelet-dir\") pod \"028bd749-7124-44a5-b43a-61c4378df60d\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.220613 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-var-lock\") pod \"028bd749-7124-44a5-b43a-61c4378df60d\" (UID: \"028bd749-7124-44a5-b43a-61c4378df60d\") " Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.220835 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-var-lock" (OuterVolumeSpecName: "var-lock") pod "028bd749-7124-44a5-b43a-61c4378df60d" (UID: "028bd749-7124-44a5-b43a-61c4378df60d"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.220959 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "028bd749-7124-44a5-b43a-61c4378df60d" (UID: "028bd749-7124-44a5-b43a-61c4378df60d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.226767 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/028bd749-7124-44a5-b43a-61c4378df60d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "028bd749-7124-44a5-b43a-61c4378df60d" (UID: "028bd749-7124-44a5-b43a-61c4378df60d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.321546 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/028bd749-7124-44a5-b43a-61c4378df60d-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.321577 4492 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.321586 4492 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/028bd749-7124-44a5-b43a-61c4378df60d-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.912462 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"028bd749-7124-44a5-b43a-61c4378df60d","Type":"ContainerDied","Data":"67654424075af15c9d0edec927cff8a458552356648e2a32014ea4ca8234044b"} Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.912693 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67654424075af15c9d0edec927cff8a458552356648e2a32014ea4ca8234044b" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.912584 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.948925 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.952935 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.953725 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.954155 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:13 crc kubenswrapper[4492]: I1126 06:52:13.954588 4492 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.029193 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.029333 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.029372 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.029324 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.029389 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.029414 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.029725 4492 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.029749 4492 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.029758 4492 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.441444 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.441709 4492 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.447032 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.923636 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.925047 4492 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f" exitCode=0 Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.925116 4492 scope.go:117] "RemoveContainer" containerID="ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.925287 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.926425 4492 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.926629 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.927863 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.928135 4492 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.943484 4492 scope.go:117] "RemoveContainer" containerID="7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.953921 4492 scope.go:117] "RemoveContainer" containerID="1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.965238 4492 scope.go:117] "RemoveContainer" containerID="15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.978163 4492 scope.go:117] "RemoveContainer" containerID="70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f" Nov 26 06:52:14 crc kubenswrapper[4492]: I1126 06:52:14.989298 4492 scope.go:117] "RemoveContainer" containerID="4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.008168 4492 scope.go:117] "RemoveContainer" containerID="ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52" Nov 26 06:52:15 crc kubenswrapper[4492]: E1126 06:52:15.008494 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\": container with ID starting with ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52 not found: ID does not exist" containerID="ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.008530 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52"} err="failed to get container status \"ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\": rpc error: code = NotFound desc = could not find container \"ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52\": container with ID starting with ef8e05b7e0643e9159acdd474c0f1ed97db182b0a7ed4f21b475ce6e4c051f52 not found: ID does not exist" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.008554 4492 scope.go:117] "RemoveContainer" containerID="7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028" Nov 26 06:52:15 crc kubenswrapper[4492]: E1126 06:52:15.008752 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\": container with ID starting with 7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028 not found: ID does not exist" containerID="7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.008775 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028"} err="failed to get container status \"7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\": rpc error: code = NotFound desc = could not find container \"7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028\": container with ID starting with 7ca15652692a4dd6b01f7ec02b8ff37ac41ed740c4c47d451e0742aa80d0c028 not found: ID does not exist" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.008788 4492 scope.go:117] "RemoveContainer" containerID="1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57" Nov 26 06:52:15 crc kubenswrapper[4492]: E1126 06:52:15.008952 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\": container with ID starting with 1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57 not found: ID does not exist" containerID="1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.008972 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57"} err="failed to get container status \"1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\": rpc error: code = NotFound desc = could not find container \"1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57\": container with ID starting with 1684e95c263df92c92efdd2240417c071f977b58d825abd2155277d7cce1fd57 not found: ID does not exist" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.008987 4492 scope.go:117] "RemoveContainer" containerID="15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b" Nov 26 06:52:15 crc kubenswrapper[4492]: E1126 06:52:15.009153 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\": container with ID starting with 15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b not found: ID does not exist" containerID="15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.009205 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b"} err="failed to get container status \"15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\": rpc error: code = NotFound desc = could not find container \"15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b\": container with ID starting with 15f4e01e3336948991062e895549de16371af485e1cc1f32591b5dc4fc62472b not found: ID does not exist" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.009220 4492 scope.go:117] "RemoveContainer" containerID="70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f" Nov 26 06:52:15 crc kubenswrapper[4492]: E1126 06:52:15.009402 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\": container with ID starting with 70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f not found: ID does not exist" containerID="70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.009420 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f"} err="failed to get container status \"70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\": rpc error: code = NotFound desc = could not find container \"70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f\": container with ID starting with 70a3d5141782b7c4dcb2ef96a026e1fb4f4ef101c967f64a20ece097bbdf007f not found: ID does not exist" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.009431 4492 scope.go:117] "RemoveContainer" containerID="4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233" Nov 26 06:52:15 crc kubenswrapper[4492]: E1126 06:52:15.009592 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\": container with ID starting with 4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233 not found: ID does not exist" containerID="4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233" Nov 26 06:52:15 crc kubenswrapper[4492]: I1126 06:52:15.009610 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233"} err="failed to get container status \"4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\": rpc error: code = NotFound desc = could not find container \"4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233\": container with ID starting with 4421dd0243f7239dfa2ede847b9cb5230663e69a214dca3dfb1a4763ea76b233 not found: ID does not exist" Nov 26 06:52:19 crc kubenswrapper[4492]: E1126 06:52:19.411698 4492 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:19 crc kubenswrapper[4492]: E1126 06:52:19.412899 4492 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:19 crc kubenswrapper[4492]: E1126 06:52:19.413211 4492 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:19 crc kubenswrapper[4492]: E1126 06:52:19.413395 4492 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:19 crc kubenswrapper[4492]: E1126 06:52:19.413597 4492 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:19 crc kubenswrapper[4492]: I1126 06:52:19.413622 4492 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 26 06:52:19 crc kubenswrapper[4492]: E1126 06:52:19.413793 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="200ms" Nov 26 06:52:19 crc kubenswrapper[4492]: E1126 06:52:19.614528 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="400ms" Nov 26 06:52:19 crc kubenswrapper[4492]: E1126 06:52:19.888355 4492 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 192.168.25.180:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b7bebde6036b3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 06:52:12.024059571 +0000 UTC m=+227.907947870,LastTimestamp:2025-11-26 06:52:12.024059571 +0000 UTC m=+227.907947870,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 06:52:20 crc kubenswrapper[4492]: E1126 06:52:20.015311 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="800ms" Nov 26 06:52:20 crc kubenswrapper[4492]: E1126 06:52:20.816210 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="1.6s" Nov 26 06:52:22 crc kubenswrapper[4492]: E1126 06:52:22.416855 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="3.2s" Nov 26 06:52:24 crc kubenswrapper[4492]: I1126 06:52:24.441439 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:24 crc kubenswrapper[4492]: I1126 06:52:24.988450 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 06:52:24 crc kubenswrapper[4492]: I1126 06:52:24.988546 4492 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20" exitCode=1 Nov 26 06:52:24 crc kubenswrapper[4492]: I1126 06:52:24.988584 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20"} Nov 26 06:52:24 crc kubenswrapper[4492]: I1126 06:52:24.989321 4492 scope.go:117] "RemoveContainer" containerID="a8187b933b520c7a9c1c7f798f841f3892c249f52eddd13c0c7585a8bc916f20" Nov 26 06:52:24 crc kubenswrapper[4492]: I1126 06:52:24.989757 4492 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:24 crc kubenswrapper[4492]: I1126 06:52:24.990211 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.437813 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.439048 4492 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.439555 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.450023 4492 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.450053 4492 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:25 crc kubenswrapper[4492]: E1126 06:52:25.450459 4492 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.450844 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:25 crc kubenswrapper[4492]: W1126 06:52:25.463978 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-1815df0c77ac43db8c14c87e0f70c317ac7de799df5935445b2b8498349e8a5b WatchSource:0}: Error finding container 1815df0c77ac43db8c14c87e0f70c317ac7de799df5935445b2b8498349e8a5b: Status 404 returned error can't find the container with id 1815df0c77ac43db8c14c87e0f70c317ac7de799df5935445b2b8498349e8a5b Nov 26 06:52:25 crc kubenswrapper[4492]: E1126 06:52:25.618003 4492 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.180:6443: connect: connection refused" interval="6.4s" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.995896 4492 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="d106243158dc487e184436bdd84a4180300ceadc998c9028080ca41c9bccfd5d" exitCode=0 Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.995997 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"d106243158dc487e184436bdd84a4180300ceadc998c9028080ca41c9bccfd5d"} Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.996185 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1815df0c77ac43db8c14c87e0f70c317ac7de799df5935445b2b8498349e8a5b"} Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.996433 4492 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.996452 4492 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:25 crc kubenswrapper[4492]: E1126 06:52:25.996806 4492 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.997068 4492 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:25 crc kubenswrapper[4492]: I1126 06:52:25.997470 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:26 crc kubenswrapper[4492]: I1126 06:52:26.001365 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 06:52:26 crc kubenswrapper[4492]: I1126 06:52:26.001420 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"11b442262a35e7c817d1d30e9324841a2bb991d539609061f3d5d0bb27446257"} Nov 26 06:52:26 crc kubenswrapper[4492]: I1126 06:52:26.002206 4492 status_manager.go:851] "Failed to get status for pod" podUID="028bd749-7124-44a5-b43a-61c4378df60d" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:26 crc kubenswrapper[4492]: I1126 06:52:26.002501 4492 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 192.168.25.180:6443: connect: connection refused" Nov 26 06:52:27 crc kubenswrapper[4492]: I1126 06:52:27.012077 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"26eaf45bd19d266fa1877ff09f9205861b258e639a3dc68fcf8fd8db46e8070e"} Nov 26 06:52:27 crc kubenswrapper[4492]: I1126 06:52:27.012469 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"cbbca3f5db6e0b6a5bfdfdb22a5d291eae3cdb33605a8f8f6d617fafc9b83325"} Nov 26 06:52:27 crc kubenswrapper[4492]: I1126 06:52:27.012483 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0fa054512ebd7b34572a2363342326729a18fec2f8f8677e16904b2885fed497"} Nov 26 06:52:27 crc kubenswrapper[4492]: I1126 06:52:27.012494 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b390ed8b39e73833250f5d7d14354c69b71897d5b5bb0de103deaf73ce5add5a"} Nov 26 06:52:27 crc kubenswrapper[4492]: I1126 06:52:27.012503 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a31abafb812550ff0f240d7376a7c2bdb889786b2b97b7823bd5a9827457039d"} Nov 26 06:52:27 crc kubenswrapper[4492]: I1126 06:52:27.012747 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:27 crc kubenswrapper[4492]: I1126 06:52:27.012818 4492 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:27 crc kubenswrapper[4492]: I1126 06:52:27.012837 4492 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:27 crc kubenswrapper[4492]: I1126 06:52:27.354110 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:52:30 crc kubenswrapper[4492]: I1126 06:52:30.454223 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:30 crc kubenswrapper[4492]: I1126 06:52:30.460359 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:30 crc kubenswrapper[4492]: I1126 06:52:30.465623 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:32 crc kubenswrapper[4492]: I1126 06:52:32.520420 4492 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:33 crc kubenswrapper[4492]: I1126 06:52:33.052774 4492 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:33 crc kubenswrapper[4492]: I1126 06:52:33.052812 4492 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:33 crc kubenswrapper[4492]: I1126 06:52:33.056435 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:33 crc kubenswrapper[4492]: I1126 06:52:33.432778 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:52:33 crc kubenswrapper[4492]: I1126 06:52:33.442360 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:52:34 crc kubenswrapper[4492]: I1126 06:52:34.057467 4492 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:34 crc kubenswrapper[4492]: I1126 06:52:34.057834 4492 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="995e57c0-8e79-4857-8451-c7f7b51a05d3" Nov 26 06:52:34 crc kubenswrapper[4492]: I1126 06:52:34.451094 4492 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="aaa609f3-d8e7-4b8d-92bf-ddefad9cf6c4" Nov 26 06:52:37 crc kubenswrapper[4492]: I1126 06:52:37.356857 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 06:52:42 crc kubenswrapper[4492]: I1126 06:52:42.907592 4492 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 06:52:43 crc kubenswrapper[4492]: I1126 06:52:43.056589 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 06:52:43 crc kubenswrapper[4492]: I1126 06:52:43.250506 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 06:52:43 crc kubenswrapper[4492]: I1126 06:52:43.266560 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 06:52:43 crc kubenswrapper[4492]: I1126 06:52:43.307004 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 06:52:43 crc kubenswrapper[4492]: I1126 06:52:43.440289 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 06:52:43 crc kubenswrapper[4492]: I1126 06:52:43.567662 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 06:52:43 crc kubenswrapper[4492]: I1126 06:52:43.669591 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 06:52:44 crc kubenswrapper[4492]: I1126 06:52:44.193374 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 06:52:44 crc kubenswrapper[4492]: I1126 06:52:44.325286 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 06:52:44 crc kubenswrapper[4492]: I1126 06:52:44.327874 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 06:52:44 crc kubenswrapper[4492]: I1126 06:52:44.441232 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 06:52:44 crc kubenswrapper[4492]: I1126 06:52:44.632430 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 06:52:44 crc kubenswrapper[4492]: I1126 06:52:44.813871 4492 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.067453 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.083044 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.128502 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.137077 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.251252 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.271652 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.437750 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.438580 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.618611 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.636392 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.742458 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 06:52:45 crc kubenswrapper[4492]: I1126 06:52:45.813309 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.038673 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.059938 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.114067 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.141938 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.150314 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.157573 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.172723 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.277459 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.456040 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.596831 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.635848 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 06:52:46 crc kubenswrapper[4492]: I1126 06:52:46.948740 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.013840 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.023091 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.085879 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.243202 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.293933 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.343680 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.352926 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.404475 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.432984 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.437954 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.460162 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.617567 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.809275 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 06:52:47 crc kubenswrapper[4492]: I1126 06:52:47.850158 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.023195 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.049542 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.154719 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.213851 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.244676 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.273455 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.385892 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.406441 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.439668 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.543273 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.585622 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.633751 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.756935 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.764320 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.770669 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.821285 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.835141 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.870564 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.877916 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.888139 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.941160 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.942239 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.948036 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 06:52:48 crc kubenswrapper[4492]: I1126 06:52:48.969956 4492 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.200823 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.354667 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.365344 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.406850 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.543092 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.549408 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.563541 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.599449 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.726027 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.799813 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 06:52:49 crc kubenswrapper[4492]: I1126 06:52:49.980503 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.235068 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.339352 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.365826 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.387316 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.387464 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.420892 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.556236 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.569901 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.642681 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.649301 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.653312 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.696377 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.846097 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.943544 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.963144 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 06:52:50 crc kubenswrapper[4492]: I1126 06:52:50.985606 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.100628 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.103790 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.164235 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.215775 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.221865 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.267097 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.300959 4492 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.306047 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.355073 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.422046 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.450234 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.511735 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.538988 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.617547 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.706283 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.728325 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.759805 4492 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.767515 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.790755 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 06:52:51 crc kubenswrapper[4492]: I1126 06:52:51.981057 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.179049 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.186217 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.255644 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.329872 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.338696 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.489304 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.602769 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.628351 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.646376 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.655900 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.671148 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.712719 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.749902 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.760065 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.778730 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.810806 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.886954 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 06:52:52 crc kubenswrapper[4492]: I1126 06:52:52.995023 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.006138 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.059009 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.083063 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.194063 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.202054 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.288966 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.415394 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.420337 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.429389 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.462329 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.465719 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.533134 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.651323 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.657875 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.842416 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.881554 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.902577 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.989520 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 06:52:53 crc kubenswrapper[4492]: I1126 06:52:53.991785 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.006552 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.021912 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.123412 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.235680 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.236073 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.236360 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.326123 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.330769 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.390001 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.521760 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.574294 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.652825 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.766447 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.795954 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.796755 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.816679 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.834027 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.868674 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.914536 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 06:52:54 crc kubenswrapper[4492]: I1126 06:52:54.918575 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.067941 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.090318 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.117964 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.264728 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.371628 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.431778 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.448823 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.455360 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.600955 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.602052 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.762419 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.814004 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.815369 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.857496 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.871821 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.872804 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.895480 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.927100 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.981834 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 06:52:55 crc kubenswrapper[4492]: I1126 06:52:55.996186 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.014165 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.033083 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.152848 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.235036 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.265804 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.277584 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.281937 4492 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.285571 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.285634 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.289793 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.298148 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.303258 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=24.303243231 podStartE2EDuration="24.303243231s" podCreationTimestamp="2025-11-26 06:52:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:52:56.299799277 +0000 UTC m=+272.183687575" watchObservedRunningTime="2025-11-26 06:52:56.303243231 +0000 UTC m=+272.187131529" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.364548 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.416982 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.456112 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.520898 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.539435 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.546840 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.557314 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.635052 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.668868 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.694277 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.695949 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.813936 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.915791 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.936624 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 06:52:56 crc kubenswrapper[4492]: I1126 06:52:56.959714 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.059895 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.091686 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.109044 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.143056 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.192990 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.280705 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.281119 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.319794 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.439088 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.594168 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.594618 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.717134 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.831242 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.926062 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 06:52:57 crc kubenswrapper[4492]: I1126 06:52:57.982848 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 06:52:58 crc kubenswrapper[4492]: I1126 06:52:58.179993 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 06:52:58 crc kubenswrapper[4492]: I1126 06:52:58.326074 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 06:52:58 crc kubenswrapper[4492]: I1126 06:52:58.355980 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 06:52:58 crc kubenswrapper[4492]: I1126 06:52:58.431672 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 06:52:58 crc kubenswrapper[4492]: I1126 06:52:58.748523 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 06:52:59 crc kubenswrapper[4492]: I1126 06:52:59.192853 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 06:52:59 crc kubenswrapper[4492]: I1126 06:52:59.223714 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 06:52:59 crc kubenswrapper[4492]: I1126 06:52:59.511770 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 06:52:59 crc kubenswrapper[4492]: I1126 06:52:59.516642 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 06:53:00 crc kubenswrapper[4492]: I1126 06:53:00.225861 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 06:53:00 crc kubenswrapper[4492]: I1126 06:53:00.554104 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 06:53:00 crc kubenswrapper[4492]: I1126 06:53:00.624800 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 06:53:01 crc kubenswrapper[4492]: I1126 06:53:01.179865 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 06:53:05 crc kubenswrapper[4492]: I1126 06:53:05.234899 4492 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 06:53:05 crc kubenswrapper[4492]: I1126 06:53:05.235161 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb" gracePeriod=5 Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.789880 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.789970 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921375 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921437 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921479 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921494 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921514 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921584 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921630 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921662 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921722 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921734 4492 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.921747 4492 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:10 crc kubenswrapper[4492]: I1126 06:53:10.930343 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:53:11 crc kubenswrapper[4492]: I1126 06:53:11.023026 4492 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:11 crc kubenswrapper[4492]: I1126 06:53:11.023057 4492 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:11 crc kubenswrapper[4492]: I1126 06:53:11.023072 4492 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:11 crc kubenswrapper[4492]: I1126 06:53:11.267741 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 06:53:11 crc kubenswrapper[4492]: I1126 06:53:11.267803 4492 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb" exitCode=137 Nov 26 06:53:11 crc kubenswrapper[4492]: I1126 06:53:11.267870 4492 scope.go:117] "RemoveContainer" containerID="59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb" Nov 26 06:53:11 crc kubenswrapper[4492]: I1126 06:53:11.267893 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 06:53:11 crc kubenswrapper[4492]: I1126 06:53:11.285134 4492 scope.go:117] "RemoveContainer" containerID="59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb" Nov 26 06:53:11 crc kubenswrapper[4492]: E1126 06:53:11.285708 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb\": container with ID starting with 59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb not found: ID does not exist" containerID="59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb" Nov 26 06:53:11 crc kubenswrapper[4492]: I1126 06:53:11.285748 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb"} err="failed to get container status \"59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb\": rpc error: code = NotFound desc = could not find container \"59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb\": container with ID starting with 59908fb92c27e15a770101bd454504536eea7a5e5f614c90e4790995fef2e4bb not found: ID does not exist" Nov 26 06:53:12 crc kubenswrapper[4492]: I1126 06:53:12.452624 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 26 06:53:15 crc kubenswrapper[4492]: I1126 06:53:15.831017 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 06:53:22 crc kubenswrapper[4492]: I1126 06:53:22.953885 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-kv6rz"] Nov 26 06:53:22 crc kubenswrapper[4492]: I1126 06:53:22.954654 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" podUID="ee15157b-cc93-408f-9520-421d06b48f34" containerName="controller-manager" containerID="cri-o://66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e" gracePeriod=30 Nov 26 06:53:22 crc kubenswrapper[4492]: I1126 06:53:22.961925 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh"] Nov 26 06:53:22 crc kubenswrapper[4492]: I1126 06:53:22.962244 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" podUID="aa815e7b-9b9b-4dd9-bd08-0104024e227e" containerName="route-controller-manager" containerID="cri-o://f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4" gracePeriod=30 Nov 26 06:53:22 crc kubenswrapper[4492]: I1126 06:53:22.977515 4492 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-z6czh container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Nov 26 06:53:22 crc kubenswrapper[4492]: I1126 06:53:22.977556 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" podUID="aa815e7b-9b9b-4dd9-bd08-0104024e227e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.296528 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.337801 4492 generic.go:334] "Generic (PLEG): container finished" podID="ee15157b-cc93-408f-9520-421d06b48f34" containerID="66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e" exitCode=0 Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.337938 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.337928 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" event={"ID":"ee15157b-cc93-408f-9520-421d06b48f34","Type":"ContainerDied","Data":"66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e"} Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.338068 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-kv6rz" event={"ID":"ee15157b-cc93-408f-9520-421d06b48f34","Type":"ContainerDied","Data":"5b1914e4b84c489a17be73614ede47288428721d6e1eb3455a82fca8da501a17"} Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.338090 4492 scope.go:117] "RemoveContainer" containerID="66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.338569 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.341750 4492 generic.go:334] "Generic (PLEG): container finished" podID="aa815e7b-9b9b-4dd9-bd08-0104024e227e" containerID="f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4" exitCode=0 Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.341781 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" event={"ID":"aa815e7b-9b9b-4dd9-bd08-0104024e227e","Type":"ContainerDied","Data":"f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4"} Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.341797 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" event={"ID":"aa815e7b-9b9b-4dd9-bd08-0104024e227e","Type":"ContainerDied","Data":"5204e0abc55469ef1611bc884b70ae8f02af347523d07a13dc953d3df88ebd17"} Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.373684 4492 scope.go:117] "RemoveContainer" containerID="66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e" Nov 26 06:53:23 crc kubenswrapper[4492]: E1126 06:53:23.379947 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e\": container with ID starting with 66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e not found: ID does not exist" containerID="66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.379990 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e"} err="failed to get container status \"66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e\": rpc error: code = NotFound desc = could not find container \"66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e\": container with ID starting with 66936f63fe7392105e3ddcd903a696fd3ad5c9ec8bef0c73cf4b0b580be3072e not found: ID does not exist" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.380017 4492 scope.go:117] "RemoveContainer" containerID="f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.395441 4492 scope.go:117] "RemoveContainer" containerID="f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4" Nov 26 06:53:23 crc kubenswrapper[4492]: E1126 06:53:23.399159 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4\": container with ID starting with f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4 not found: ID does not exist" containerID="f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.399218 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4"} err="failed to get container status \"f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4\": rpc error: code = NotFound desc = could not find container \"f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4\": container with ID starting with f4e48ddeeebc6f76416d6331f43349705081de493624e306ede6c0cb4bd72cf4 not found: ID does not exist" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.448151 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-config\") pod \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.448517 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-config\") pod \"ee15157b-cc93-408f-9520-421d06b48f34\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.448547 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa815e7b-9b9b-4dd9-bd08-0104024e227e-serving-cert\") pod \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.448608 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee15157b-cc93-408f-9520-421d06b48f34-serving-cert\") pod \"ee15157b-cc93-408f-9520-421d06b48f34\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.448643 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-client-ca\") pod \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.448764 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wx5tz\" (UniqueName: \"kubernetes.io/projected/aa815e7b-9b9b-4dd9-bd08-0104024e227e-kube-api-access-wx5tz\") pod \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\" (UID: \"aa815e7b-9b9b-4dd9-bd08-0104024e227e\") " Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.448813 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-client-ca\") pod \"ee15157b-cc93-408f-9520-421d06b48f34\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.448842 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-proxy-ca-bundles\") pod \"ee15157b-cc93-408f-9520-421d06b48f34\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.448890 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/ee15157b-cc93-408f-9520-421d06b48f34-kube-api-access-5b7x6\") pod \"ee15157b-cc93-408f-9520-421d06b48f34\" (UID: \"ee15157b-cc93-408f-9520-421d06b48f34\") " Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.449143 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-config" (OuterVolumeSpecName: "config") pod "aa815e7b-9b9b-4dd9-bd08-0104024e227e" (UID: "aa815e7b-9b9b-4dd9-bd08-0104024e227e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.449473 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.449510 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-client-ca" (OuterVolumeSpecName: "client-ca") pod "ee15157b-cc93-408f-9520-421d06b48f34" (UID: "ee15157b-cc93-408f-9520-421d06b48f34"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.449620 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-config" (OuterVolumeSpecName: "config") pod "ee15157b-cc93-408f-9520-421d06b48f34" (UID: "ee15157b-cc93-408f-9520-421d06b48f34"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.449963 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-client-ca" (OuterVolumeSpecName: "client-ca") pod "aa815e7b-9b9b-4dd9-bd08-0104024e227e" (UID: "aa815e7b-9b9b-4dd9-bd08-0104024e227e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.450347 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "ee15157b-cc93-408f-9520-421d06b48f34" (UID: "ee15157b-cc93-408f-9520-421d06b48f34"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.455470 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee15157b-cc93-408f-9520-421d06b48f34-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ee15157b-cc93-408f-9520-421d06b48f34" (UID: "ee15157b-cc93-408f-9520-421d06b48f34"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.455668 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa815e7b-9b9b-4dd9-bd08-0104024e227e-kube-api-access-wx5tz" (OuterVolumeSpecName: "kube-api-access-wx5tz") pod "aa815e7b-9b9b-4dd9-bd08-0104024e227e" (UID: "aa815e7b-9b9b-4dd9-bd08-0104024e227e"). InnerVolumeSpecName "kube-api-access-wx5tz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.455686 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa815e7b-9b9b-4dd9-bd08-0104024e227e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "aa815e7b-9b9b-4dd9-bd08-0104024e227e" (UID: "aa815e7b-9b9b-4dd9-bd08-0104024e227e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.455707 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee15157b-cc93-408f-9520-421d06b48f34-kube-api-access-5b7x6" (OuterVolumeSpecName: "kube-api-access-5b7x6") pod "ee15157b-cc93-408f-9520-421d06b48f34" (UID: "ee15157b-cc93-408f-9520-421d06b48f34"). InnerVolumeSpecName "kube-api-access-5b7x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.549779 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee15157b-cc93-408f-9520-421d06b48f34-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.549818 4492 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa815e7b-9b9b-4dd9-bd08-0104024e227e-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.549831 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wx5tz\" (UniqueName: \"kubernetes.io/projected/aa815e7b-9b9b-4dd9-bd08-0104024e227e-kube-api-access-wx5tz\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.549842 4492 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.549852 4492 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.549861 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5b7x6\" (UniqueName: \"kubernetes.io/projected/ee15157b-cc93-408f-9520-421d06b48f34-kube-api-access-5b7x6\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.549870 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee15157b-cc93-408f-9520-421d06b48f34-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.549878 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa815e7b-9b9b-4dd9-bd08-0104024e227e-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.671449 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-kv6rz"] Nov 26 06:53:23 crc kubenswrapper[4492]: I1126 06:53:23.676154 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-kv6rz"] Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.349560 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.370374 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh"] Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.373762 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-z6czh"] Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.444002 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa815e7b-9b9b-4dd9-bd08-0104024e227e" path="/var/lib/kubelet/pods/aa815e7b-9b9b-4dd9-bd08-0104024e227e/volumes" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.444788 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee15157b-cc93-408f-9520-421d06b48f34" path="/var/lib/kubelet/pods/ee15157b-cc93-408f-9520-421d06b48f34/volumes" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.842254 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc"] Nov 26 06:53:24 crc kubenswrapper[4492]: E1126 06:53:24.842972 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="028bd749-7124-44a5-b43a-61c4378df60d" containerName="installer" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.842990 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="028bd749-7124-44a5-b43a-61c4378df60d" containerName="installer" Nov 26 06:53:24 crc kubenswrapper[4492]: E1126 06:53:24.843006 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee15157b-cc93-408f-9520-421d06b48f34" containerName="controller-manager" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.843013 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee15157b-cc93-408f-9520-421d06b48f34" containerName="controller-manager" Nov 26 06:53:24 crc kubenswrapper[4492]: E1126 06:53:24.843025 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.843036 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 06:53:24 crc kubenswrapper[4492]: E1126 06:53:24.843048 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa815e7b-9b9b-4dd9-bd08-0104024e227e" containerName="route-controller-manager" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.843055 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa815e7b-9b9b-4dd9-bd08-0104024e227e" containerName="route-controller-manager" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.843219 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee15157b-cc93-408f-9520-421d06b48f34" containerName="controller-manager" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.843245 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa815e7b-9b9b-4dd9-bd08-0104024e227e" containerName="route-controller-manager" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.843254 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.843265 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="028bd749-7124-44a5-b43a-61c4378df60d" containerName="installer" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.843908 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.847236 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6c944cf5f-j7w8g"] Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.848002 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.850075 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.850598 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.850805 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.850903 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.851033 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.851149 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.851140 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc"] Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.851494 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.851517 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.851756 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.851839 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.854208 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.854585 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.859285 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c944cf5f-j7w8g"] Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.861844 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.865397 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-client-ca\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.865437 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtmjv\" (UniqueName: \"kubernetes.io/projected/5d8170b3-5e3e-493a-8cb8-f7119da01476-kube-api-access-jtmjv\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.865484 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzgc9\" (UniqueName: \"kubernetes.io/projected/d931a019-9a9d-4297-aad0-ffebb6ab60f1-kube-api-access-kzgc9\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.865531 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d8170b3-5e3e-493a-8cb8-f7119da01476-serving-cert\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.865569 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-config\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.865603 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d931a019-9a9d-4297-aad0-ffebb6ab60f1-serving-cert\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.865643 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-proxy-ca-bundles\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.865672 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-config\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.865688 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-client-ca\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.966842 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d8170b3-5e3e-493a-8cb8-f7119da01476-serving-cert\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.966894 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-config\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.966929 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d931a019-9a9d-4297-aad0-ffebb6ab60f1-serving-cert\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.966965 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-proxy-ca-bundles\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.966989 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-client-ca\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.967010 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-config\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.967030 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-client-ca\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.967049 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtmjv\" (UniqueName: \"kubernetes.io/projected/5d8170b3-5e3e-493a-8cb8-f7119da01476-kube-api-access-jtmjv\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.967079 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzgc9\" (UniqueName: \"kubernetes.io/projected/d931a019-9a9d-4297-aad0-ffebb6ab60f1-kube-api-access-kzgc9\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.968468 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-client-ca\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.968593 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-proxy-ca-bundles\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.968880 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-config\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.968970 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-config\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.969396 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-client-ca\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.973630 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d8170b3-5e3e-493a-8cb8-f7119da01476-serving-cert\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.973642 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d931a019-9a9d-4297-aad0-ffebb6ab60f1-serving-cert\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.982400 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzgc9\" (UniqueName: \"kubernetes.io/projected/d931a019-9a9d-4297-aad0-ffebb6ab60f1-kube-api-access-kzgc9\") pod \"route-controller-manager-6c5dd59bc9-j7klc\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:24 crc kubenswrapper[4492]: I1126 06:53:24.982429 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtmjv\" (UniqueName: \"kubernetes.io/projected/5d8170b3-5e3e-493a-8cb8-f7119da01476-kube-api-access-jtmjv\") pod \"controller-manager-6c944cf5f-j7w8g\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:25 crc kubenswrapper[4492]: I1126 06:53:25.165030 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:25 crc kubenswrapper[4492]: I1126 06:53:25.168968 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:25 crc kubenswrapper[4492]: I1126 06:53:25.337594 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc"] Nov 26 06:53:25 crc kubenswrapper[4492]: I1126 06:53:25.360461 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" event={"ID":"d931a019-9a9d-4297-aad0-ffebb6ab60f1","Type":"ContainerStarted","Data":"61bccd4f16b5f5aa9e8ba8a82ac95b21af9628b122c776ecf30df6b8d22ee4f8"} Nov 26 06:53:25 crc kubenswrapper[4492]: I1126 06:53:25.364847 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c944cf5f-j7w8g"] Nov 26 06:53:26 crc kubenswrapper[4492]: I1126 06:53:26.367759 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" event={"ID":"d931a019-9a9d-4297-aad0-ffebb6ab60f1","Type":"ContainerStarted","Data":"6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36"} Nov 26 06:53:26 crc kubenswrapper[4492]: I1126 06:53:26.368504 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:26 crc kubenswrapper[4492]: I1126 06:53:26.370584 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" event={"ID":"5d8170b3-5e3e-493a-8cb8-f7119da01476","Type":"ContainerStarted","Data":"7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf"} Nov 26 06:53:26 crc kubenswrapper[4492]: I1126 06:53:26.370701 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" event={"ID":"5d8170b3-5e3e-493a-8cb8-f7119da01476","Type":"ContainerStarted","Data":"5fc0caf1bd5447584abccf5ab183332c917e9ccb888b834fa86384810cabaab8"} Nov 26 06:53:26 crc kubenswrapper[4492]: I1126 06:53:26.370916 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:26 crc kubenswrapper[4492]: I1126 06:53:26.374638 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:53:26 crc kubenswrapper[4492]: I1126 06:53:26.376029 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:26 crc kubenswrapper[4492]: I1126 06:53:26.383914 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" podStartSLOduration=3.383885736 podStartE2EDuration="3.383885736s" podCreationTimestamp="2025-11-26 06:53:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:53:26.381466699 +0000 UTC m=+302.265354997" watchObservedRunningTime="2025-11-26 06:53:26.383885736 +0000 UTC m=+302.267774034" Nov 26 06:53:26 crc kubenswrapper[4492]: I1126 06:53:26.413934 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" podStartSLOduration=3.413909016 podStartE2EDuration="3.413909016s" podCreationTimestamp="2025-11-26 06:53:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:53:26.410667373 +0000 UTC m=+302.294555671" watchObservedRunningTime="2025-11-26 06:53:26.413909016 +0000 UTC m=+302.297797305" Nov 26 06:53:42 crc kubenswrapper[4492]: I1126 06:53:42.935935 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc"] Nov 26 06:53:42 crc kubenswrapper[4492]: I1126 06:53:42.936980 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" podUID="d931a019-9a9d-4297-aad0-ffebb6ab60f1" containerName="route-controller-manager" containerID="cri-o://6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36" gracePeriod=30 Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.333446 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.472795 4492 generic.go:334] "Generic (PLEG): container finished" podID="d931a019-9a9d-4297-aad0-ffebb6ab60f1" containerID="6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36" exitCode=0 Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.472851 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.472859 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" event={"ID":"d931a019-9a9d-4297-aad0-ffebb6ab60f1","Type":"ContainerDied","Data":"6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36"} Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.472924 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc" event={"ID":"d931a019-9a9d-4297-aad0-ffebb6ab60f1","Type":"ContainerDied","Data":"61bccd4f16b5f5aa9e8ba8a82ac95b21af9628b122c776ecf30df6b8d22ee4f8"} Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.472958 4492 scope.go:117] "RemoveContainer" containerID="6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.486725 4492 scope.go:117] "RemoveContainer" containerID="6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36" Nov 26 06:53:43 crc kubenswrapper[4492]: E1126 06:53:43.487026 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36\": container with ID starting with 6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36 not found: ID does not exist" containerID="6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.487062 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36"} err="failed to get container status \"6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36\": rpc error: code = NotFound desc = could not find container \"6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36\": container with ID starting with 6f9dc0948676e330b760cc1e96c8f72e5d45403a7c48bc11c9398dd62d1d3a36 not found: ID does not exist" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.496502 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d931a019-9a9d-4297-aad0-ffebb6ab60f1-serving-cert\") pod \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.496570 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-client-ca\") pod \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.496660 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzgc9\" (UniqueName: \"kubernetes.io/projected/d931a019-9a9d-4297-aad0-ffebb6ab60f1-kube-api-access-kzgc9\") pod \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.496730 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-config\") pod \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\" (UID: \"d931a019-9a9d-4297-aad0-ffebb6ab60f1\") " Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.497705 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-config" (OuterVolumeSpecName: "config") pod "d931a019-9a9d-4297-aad0-ffebb6ab60f1" (UID: "d931a019-9a9d-4297-aad0-ffebb6ab60f1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.497843 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-client-ca" (OuterVolumeSpecName: "client-ca") pod "d931a019-9a9d-4297-aad0-ffebb6ab60f1" (UID: "d931a019-9a9d-4297-aad0-ffebb6ab60f1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.501850 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d931a019-9a9d-4297-aad0-ffebb6ab60f1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d931a019-9a9d-4297-aad0-ffebb6ab60f1" (UID: "d931a019-9a9d-4297-aad0-ffebb6ab60f1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.502523 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d931a019-9a9d-4297-aad0-ffebb6ab60f1-kube-api-access-kzgc9" (OuterVolumeSpecName: "kube-api-access-kzgc9") pod "d931a019-9a9d-4297-aad0-ffebb6ab60f1" (UID: "d931a019-9a9d-4297-aad0-ffebb6ab60f1"). InnerVolumeSpecName "kube-api-access-kzgc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.598331 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.598357 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d931a019-9a9d-4297-aad0-ffebb6ab60f1-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.598367 4492 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d931a019-9a9d-4297-aad0-ffebb6ab60f1-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.598376 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzgc9\" (UniqueName: \"kubernetes.io/projected/d931a019-9a9d-4297-aad0-ffebb6ab60f1-kube-api-access-kzgc9\") on node \"crc\" DevicePath \"\"" Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.794814 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc"] Nov 26 06:53:43 crc kubenswrapper[4492]: I1126 06:53:43.797254 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c5dd59bc9-j7klc"] Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.445261 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d931a019-9a9d-4297-aad0-ffebb6ab60f1" path="/var/lib/kubelet/pods/d931a019-9a9d-4297-aad0-ffebb6ab60f1/volumes" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.850261 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh"] Nov 26 06:53:44 crc kubenswrapper[4492]: E1126 06:53:44.850580 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d931a019-9a9d-4297-aad0-ffebb6ab60f1" containerName="route-controller-manager" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.850606 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d931a019-9a9d-4297-aad0-ffebb6ab60f1" containerName="route-controller-manager" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.850705 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d931a019-9a9d-4297-aad0-ffebb6ab60f1" containerName="route-controller-manager" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.851317 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.854307 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.855977 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.856476 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.856667 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.860248 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.860253 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 06:53:44 crc kubenswrapper[4492]: I1126 06:53:44.865057 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh"] Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.018523 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0b6cfc4-59e7-410f-bbe3-659ad314d449-serving-cert\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.018574 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0b6cfc4-59e7-410f-bbe3-659ad314d449-config\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.018745 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9dng\" (UniqueName: \"kubernetes.io/projected/a0b6cfc4-59e7-410f-bbe3-659ad314d449-kube-api-access-r9dng\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.018804 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0b6cfc4-59e7-410f-bbe3-659ad314d449-client-ca\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.120407 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9dng\" (UniqueName: \"kubernetes.io/projected/a0b6cfc4-59e7-410f-bbe3-659ad314d449-kube-api-access-r9dng\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.120471 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0b6cfc4-59e7-410f-bbe3-659ad314d449-client-ca\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.120560 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0b6cfc4-59e7-410f-bbe3-659ad314d449-serving-cert\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.120590 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0b6cfc4-59e7-410f-bbe3-659ad314d449-config\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.121610 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0b6cfc4-59e7-410f-bbe3-659ad314d449-client-ca\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.121833 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0b6cfc4-59e7-410f-bbe3-659ad314d449-config\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.124613 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0b6cfc4-59e7-410f-bbe3-659ad314d449-serving-cert\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.135448 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9dng\" (UniqueName: \"kubernetes.io/projected/a0b6cfc4-59e7-410f-bbe3-659ad314d449-kube-api-access-r9dng\") pod \"route-controller-manager-6f75b6998c-rwwsh\" (UID: \"a0b6cfc4-59e7-410f-bbe3-659ad314d449\") " pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.166281 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:45 crc kubenswrapper[4492]: I1126 06:53:45.555745 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh"] Nov 26 06:53:45 crc kubenswrapper[4492]: W1126 06:53:45.563342 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0b6cfc4_59e7_410f_bbe3_659ad314d449.slice/crio-f617d0d90af3c580885755cdd8d63e8e430e099105f949fafb5d08a9b7adb5aa WatchSource:0}: Error finding container f617d0d90af3c580885755cdd8d63e8e430e099105f949fafb5d08a9b7adb5aa: Status 404 returned error can't find the container with id f617d0d90af3c580885755cdd8d63e8e430e099105f949fafb5d08a9b7adb5aa Nov 26 06:53:46 crc kubenswrapper[4492]: I1126 06:53:46.497362 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" event={"ID":"a0b6cfc4-59e7-410f-bbe3-659ad314d449","Type":"ContainerStarted","Data":"ef90248f8f91e651275bda02177bf59ceec36a4f7e604fb6fa01086f397e1305"} Nov 26 06:53:46 crc kubenswrapper[4492]: I1126 06:53:46.497964 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:46 crc kubenswrapper[4492]: I1126 06:53:46.498065 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" event={"ID":"a0b6cfc4-59e7-410f-bbe3-659ad314d449","Type":"ContainerStarted","Data":"f617d0d90af3c580885755cdd8d63e8e430e099105f949fafb5d08a9b7adb5aa"} Nov 26 06:53:46 crc kubenswrapper[4492]: I1126 06:53:46.501793 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" Nov 26 06:53:46 crc kubenswrapper[4492]: I1126 06:53:46.517294 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6f75b6998c-rwwsh" podStartSLOduration=4.517278037 podStartE2EDuration="4.517278037s" podCreationTimestamp="2025-11-26 06:53:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:53:46.51089056 +0000 UTC m=+322.394778849" watchObservedRunningTime="2025-11-26 06:53:46.517278037 +0000 UTC m=+322.401166335" Nov 26 06:53:49 crc kubenswrapper[4492]: I1126 06:53:49.441525 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:53:49 crc kubenswrapper[4492]: I1126 06:53:49.441881 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:54:19 crc kubenswrapper[4492]: I1126 06:54:19.441218 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:54:19 crc kubenswrapper[4492]: I1126 06:54:19.441740 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:54:22 crc kubenswrapper[4492]: I1126 06:54:22.938833 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c944cf5f-j7w8g"] Nov 26 06:54:22 crc kubenswrapper[4492]: I1126 06:54:22.939996 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" podUID="5d8170b3-5e3e-493a-8cb8-f7119da01476" containerName="controller-manager" containerID="cri-o://7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf" gracePeriod=30 Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.328512 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.485111 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d8170b3-5e3e-493a-8cb8-f7119da01476-serving-cert\") pod \"5d8170b3-5e3e-493a-8cb8-f7119da01476\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.485195 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtmjv\" (UniqueName: \"kubernetes.io/projected/5d8170b3-5e3e-493a-8cb8-f7119da01476-kube-api-access-jtmjv\") pod \"5d8170b3-5e3e-493a-8cb8-f7119da01476\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.485220 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-client-ca\") pod \"5d8170b3-5e3e-493a-8cb8-f7119da01476\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.485258 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-proxy-ca-bundles\") pod \"5d8170b3-5e3e-493a-8cb8-f7119da01476\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.485347 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-config\") pod \"5d8170b3-5e3e-493a-8cb8-f7119da01476\" (UID: \"5d8170b3-5e3e-493a-8cb8-f7119da01476\") " Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.486422 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-client-ca" (OuterVolumeSpecName: "client-ca") pod "5d8170b3-5e3e-493a-8cb8-f7119da01476" (UID: "5d8170b3-5e3e-493a-8cb8-f7119da01476"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.486636 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "5d8170b3-5e3e-493a-8cb8-f7119da01476" (UID: "5d8170b3-5e3e-493a-8cb8-f7119da01476"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.486754 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-config" (OuterVolumeSpecName: "config") pod "5d8170b3-5e3e-493a-8cb8-f7119da01476" (UID: "5d8170b3-5e3e-493a-8cb8-f7119da01476"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.491330 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d8170b3-5e3e-493a-8cb8-f7119da01476-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5d8170b3-5e3e-493a-8cb8-f7119da01476" (UID: "5d8170b3-5e3e-493a-8cb8-f7119da01476"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.501015 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d8170b3-5e3e-493a-8cb8-f7119da01476-kube-api-access-jtmjv" (OuterVolumeSpecName: "kube-api-access-jtmjv") pod "5d8170b3-5e3e-493a-8cb8-f7119da01476" (UID: "5d8170b3-5e3e-493a-8cb8-f7119da01476"). InnerVolumeSpecName "kube-api-access-jtmjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.587029 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.587146 4492 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d8170b3-5e3e-493a-8cb8-f7119da01476-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.587188 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtmjv\" (UniqueName: \"kubernetes.io/projected/5d8170b3-5e3e-493a-8cb8-f7119da01476-kube-api-access-jtmjv\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.587202 4492 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.587211 4492 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5d8170b3-5e3e-493a-8cb8-f7119da01476-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.690133 4492 generic.go:334] "Generic (PLEG): container finished" podID="5d8170b3-5e3e-493a-8cb8-f7119da01476" containerID="7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf" exitCode=0 Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.691299 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" event={"ID":"5d8170b3-5e3e-493a-8cb8-f7119da01476","Type":"ContainerDied","Data":"7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf"} Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.691375 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" event={"ID":"5d8170b3-5e3e-493a-8cb8-f7119da01476","Type":"ContainerDied","Data":"5fc0caf1bd5447584abccf5ab183332c917e9ccb888b834fa86384810cabaab8"} Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.691405 4492 scope.go:117] "RemoveContainer" containerID="7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.691729 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c944cf5f-j7w8g" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.705091 4492 scope.go:117] "RemoveContainer" containerID="7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf" Nov 26 06:54:23 crc kubenswrapper[4492]: E1126 06:54:23.705408 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf\": container with ID starting with 7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf not found: ID does not exist" containerID="7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.705440 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf"} err="failed to get container status \"7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf\": rpc error: code = NotFound desc = could not find container \"7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf\": container with ID starting with 7d9036c033cdb5eb2618cf85422db4c77862e9677e94cd8231a6a2021ba545bf not found: ID does not exist" Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.724360 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c944cf5f-j7w8g"] Nov 26 06:54:23 crc kubenswrapper[4492]: I1126 06:54:23.728038 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6c944cf5f-j7w8g"] Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.446298 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d8170b3-5e3e-493a-8cb8-f7119da01476" path="/var/lib/kubelet/pods/5d8170b3-5e3e-493a-8cb8-f7119da01476/volumes" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.873540 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5d6b658c6-7t69t"] Nov 26 06:54:24 crc kubenswrapper[4492]: E1126 06:54:24.873999 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d8170b3-5e3e-493a-8cb8-f7119da01476" containerName="controller-manager" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.874012 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d8170b3-5e3e-493a-8cb8-f7119da01476" containerName="controller-manager" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.874107 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d8170b3-5e3e-493a-8cb8-f7119da01476" containerName="controller-manager" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.874509 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.875979 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.876197 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.876223 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.876826 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.877065 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.877068 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.881757 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 06:54:24 crc kubenswrapper[4492]: I1126 06:54:24.888754 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d6b658c6-7t69t"] Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.012285 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-client-ca\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.012341 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-serving-cert\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.012390 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q2rt\" (UniqueName: \"kubernetes.io/projected/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-kube-api-access-2q2rt\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.012495 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-proxy-ca-bundles\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.012683 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-config\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.114041 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-serving-cert\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.114100 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2q2rt\" (UniqueName: \"kubernetes.io/projected/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-kube-api-access-2q2rt\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.114124 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-proxy-ca-bundles\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.114160 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-config\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.114239 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-client-ca\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.115393 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-client-ca\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.115592 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-proxy-ca-bundles\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.115820 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-config\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.118468 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-serving-cert\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.127416 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q2rt\" (UniqueName: \"kubernetes.io/projected/8164262d-a5f4-4ac1-b58e-2c53cdc38e11-kube-api-access-2q2rt\") pod \"controller-manager-5d6b658c6-7t69t\" (UID: \"8164262d-a5f4-4ac1-b58e-2c53cdc38e11\") " pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.188301 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.343209 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d6b658c6-7t69t"] Nov 26 06:54:25 crc kubenswrapper[4492]: W1126 06:54:25.345714 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8164262d_a5f4_4ac1_b58e_2c53cdc38e11.slice/crio-a750d047d65626497634e711fcf19b0435cc62dd09c6d64df03715806b424b84 WatchSource:0}: Error finding container a750d047d65626497634e711fcf19b0435cc62dd09c6d64df03715806b424b84: Status 404 returned error can't find the container with id a750d047d65626497634e711fcf19b0435cc62dd09c6d64df03715806b424b84 Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.702000 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" event={"ID":"8164262d-a5f4-4ac1-b58e-2c53cdc38e11","Type":"ContainerStarted","Data":"6385fd0749200638153c690f1688bb0a111105874289d0130f97bb150b3c12b5"} Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.702048 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" event={"ID":"8164262d-a5f4-4ac1-b58e-2c53cdc38e11","Type":"ContainerStarted","Data":"a750d047d65626497634e711fcf19b0435cc62dd09c6d64df03715806b424b84"} Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.704463 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.719806 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" Nov 26 06:54:25 crc kubenswrapper[4492]: I1126 06:54:25.728696 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5d6b658c6-7t69t" podStartSLOduration=3.728684775 podStartE2EDuration="3.728684775s" podCreationTimestamp="2025-11-26 06:54:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:54:25.726695205 +0000 UTC m=+361.610583493" watchObservedRunningTime="2025-11-26 06:54:25.728684775 +0000 UTC m=+361.612573073" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.682304 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kt8n4"] Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.683080 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kt8n4" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerName="registry-server" containerID="cri-o://6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53" gracePeriod=30 Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.691738 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d925t"] Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.692033 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d925t" podUID="a236508b-a76f-4029-b748-7bfdbe412825" containerName="registry-server" containerID="cri-o://7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f" gracePeriod=30 Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.702644 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-sb6qj"] Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.703004 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" podUID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" containerName="marketplace-operator" containerID="cri-o://b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d" gracePeriod=30 Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.710669 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rl4j"] Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.710921 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7rl4j" podUID="abb41535-15fd-41da-9e54-d68ec23a99be" containerName="registry-server" containerID="cri-o://07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5" gracePeriod=30 Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.719428 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m6grh"] Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.719583 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-m6grh" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerName="registry-server" containerID="cri-o://0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9" gracePeriod=30 Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.727078 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t8j7j"] Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.727869 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.734086 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw4hs\" (UniqueName: \"kubernetes.io/projected/80cbffbd-a369-43ed-a88c-341dca77afa4-kube-api-access-tw4hs\") pod \"marketplace-operator-79b997595-t8j7j\" (UID: \"80cbffbd-a369-43ed-a88c-341dca77afa4\") " pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.734133 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/80cbffbd-a369-43ed-a88c-341dca77afa4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-t8j7j\" (UID: \"80cbffbd-a369-43ed-a88c-341dca77afa4\") " pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.734159 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/80cbffbd-a369-43ed-a88c-341dca77afa4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-t8j7j\" (UID: \"80cbffbd-a369-43ed-a88c-341dca77afa4\") " pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.779138 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t8j7j"] Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.835262 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw4hs\" (UniqueName: \"kubernetes.io/projected/80cbffbd-a369-43ed-a88c-341dca77afa4-kube-api-access-tw4hs\") pod \"marketplace-operator-79b997595-t8j7j\" (UID: \"80cbffbd-a369-43ed-a88c-341dca77afa4\") " pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.835309 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/80cbffbd-a369-43ed-a88c-341dca77afa4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-t8j7j\" (UID: \"80cbffbd-a369-43ed-a88c-341dca77afa4\") " pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.835329 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/80cbffbd-a369-43ed-a88c-341dca77afa4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-t8j7j\" (UID: \"80cbffbd-a369-43ed-a88c-341dca77afa4\") " pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.838279 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/80cbffbd-a369-43ed-a88c-341dca77afa4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-t8j7j\" (UID: \"80cbffbd-a369-43ed-a88c-341dca77afa4\") " pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.843733 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/80cbffbd-a369-43ed-a88c-341dca77afa4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-t8j7j\" (UID: \"80cbffbd-a369-43ed-a88c-341dca77afa4\") " pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:33 crc kubenswrapper[4492]: I1126 06:54:33.855759 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw4hs\" (UniqueName: \"kubernetes.io/projected/80cbffbd-a369-43ed-a88c-341dca77afa4-kube-api-access-tw4hs\") pod \"marketplace-operator-79b997595-t8j7j\" (UID: \"80cbffbd-a369-43ed-a88c-341dca77afa4\") " pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.038220 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-9j8px"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.042482 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.049006 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.079752 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-9j8px"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.232809 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.241588 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-registry-tls\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.241618 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shhgw\" (UniqueName: \"kubernetes.io/projected/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-kube-api-access-shhgw\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.241720 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-trusted-ca\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.241769 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-bound-sa-token\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.241789 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.241807 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-registry-certificates\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.241842 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.241875 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.316265 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.323054 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.327861 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.332453 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d925t" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352414 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-utilities\") pod \"a236508b-a76f-4029-b748-7bfdbe412825\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352464 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxbgt\" (UniqueName: \"kubernetes.io/projected/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-kube-api-access-pxbgt\") pod \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352490 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-trusted-ca\") pod \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352509 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnrrj\" (UniqueName: \"kubernetes.io/projected/abb41535-15fd-41da-9e54-d68ec23a99be-kube-api-access-xnrrj\") pod \"abb41535-15fd-41da-9e54-d68ec23a99be\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352545 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-catalog-content\") pod \"abb41535-15fd-41da-9e54-d68ec23a99be\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352603 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-catalog-content\") pod \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352620 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4thv\" (UniqueName: \"kubernetes.io/projected/05a0ee0a-7b86-490a-8638-8d74ad1446ea-kube-api-access-h4thv\") pod \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352645 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsddt\" (UniqueName: \"kubernetes.io/projected/a236508b-a76f-4029-b748-7bfdbe412825-kube-api-access-tsddt\") pod \"a236508b-a76f-4029-b748-7bfdbe412825\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352660 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-catalog-content\") pod \"a236508b-a76f-4029-b748-7bfdbe412825\" (UID: \"a236508b-a76f-4029-b748-7bfdbe412825\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352675 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-utilities\") pod \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\" (UID: \"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352711 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-operator-metrics\") pod \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\" (UID: \"05a0ee0a-7b86-490a-8638-8d74ad1446ea\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352728 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-utilities\") pod \"abb41535-15fd-41da-9e54-d68ec23a99be\" (UID: \"abb41535-15fd-41da-9e54-d68ec23a99be\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352894 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-registry-tls\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352927 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shhgw\" (UniqueName: \"kubernetes.io/projected/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-kube-api-access-shhgw\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352966 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-trusted-ca\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.352982 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-bound-sa-token\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.353004 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.353022 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-registry-certificates\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.353045 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.353763 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.354819 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-utilities" (OuterVolumeSpecName: "utilities") pod "72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" (UID: "72328da8-8a37-4bd5-b1f5-c26ce6aefd5a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.355445 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-utilities" (OuterVolumeSpecName: "utilities") pod "a236508b-a76f-4029-b748-7bfdbe412825" (UID: "a236508b-a76f-4029-b748-7bfdbe412825"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.356223 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-utilities" (OuterVolumeSpecName: "utilities") pod "abb41535-15fd-41da-9e54-d68ec23a99be" (UID: "abb41535-15fd-41da-9e54-d68ec23a99be"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.356635 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-trusted-ca\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.360072 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abb41535-15fd-41da-9e54-d68ec23a99be-kube-api-access-xnrrj" (OuterVolumeSpecName: "kube-api-access-xnrrj") pod "abb41535-15fd-41da-9e54-d68ec23a99be" (UID: "abb41535-15fd-41da-9e54-d68ec23a99be"). InnerVolumeSpecName "kube-api-access-xnrrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.363899 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-registry-certificates\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.366330 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "05a0ee0a-7b86-490a-8638-8d74ad1446ea" (UID: "05a0ee0a-7b86-490a-8638-8d74ad1446ea"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.370672 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05a0ee0a-7b86-490a-8638-8d74ad1446ea-kube-api-access-h4thv" (OuterVolumeSpecName: "kube-api-access-h4thv") pod "05a0ee0a-7b86-490a-8638-8d74ad1446ea" (UID: "05a0ee0a-7b86-490a-8638-8d74ad1446ea"). InnerVolumeSpecName "kube-api-access-h4thv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.373603 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.374037 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "05a0ee0a-7b86-490a-8638-8d74ad1446ea" (UID: "05a0ee0a-7b86-490a-8638-8d74ad1446ea"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.378817 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "abb41535-15fd-41da-9e54-d68ec23a99be" (UID: "abb41535-15fd-41da-9e54-d68ec23a99be"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.383455 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-kube-api-access-pxbgt" (OuterVolumeSpecName: "kube-api-access-pxbgt") pod "72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" (UID: "72328da8-8a37-4bd5-b1f5-c26ce6aefd5a"). InnerVolumeSpecName "kube-api-access-pxbgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.391677 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-registry-tls\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.396353 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a236508b-a76f-4029-b748-7bfdbe412825-kube-api-access-tsddt" (OuterVolumeSpecName: "kube-api-access-tsddt") pod "a236508b-a76f-4029-b748-7bfdbe412825" (UID: "a236508b-a76f-4029-b748-7bfdbe412825"). InnerVolumeSpecName "kube-api-access-tsddt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.416033 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-bound-sa-token\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.429265 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.444695 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shhgw\" (UniqueName: \"kubernetes.io/projected/729be87d-6119-4c4b-8b50-3e3bf5f7f10d-kube-api-access-shhgw\") pod \"image-registry-66df7c8f76-9j8px\" (UID: \"729be87d-6119-4c4b-8b50-3e3bf5f7f10d\") " pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.459682 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-catalog-content\") pod \"817798d6-9eae-4ee2-9b2f-53c54772866c\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.459742 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrwlm\" (UniqueName: \"kubernetes.io/projected/817798d6-9eae-4ee2-9b2f-53c54772866c-kube-api-access-jrwlm\") pod \"817798d6-9eae-4ee2-9b2f-53c54772866c\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460070 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460091 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxbgt\" (UniqueName: \"kubernetes.io/projected/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-kube-api-access-pxbgt\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460102 4492 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460112 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnrrj\" (UniqueName: \"kubernetes.io/projected/abb41535-15fd-41da-9e54-d68ec23a99be-kube-api-access-xnrrj\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460121 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460130 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4thv\" (UniqueName: \"kubernetes.io/projected/05a0ee0a-7b86-490a-8638-8d74ad1446ea-kube-api-access-h4thv\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460138 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsddt\" (UniqueName: \"kubernetes.io/projected/a236508b-a76f-4029-b748-7bfdbe412825-kube-api-access-tsddt\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460147 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460155 4492 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/05a0ee0a-7b86-490a-8638-8d74ad1446ea-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.460165 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/abb41535-15fd-41da-9e54-d68ec23a99be-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.463535 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/817798d6-9eae-4ee2-9b2f-53c54772866c-kube-api-access-jrwlm" (OuterVolumeSpecName: "kube-api-access-jrwlm") pod "817798d6-9eae-4ee2-9b2f-53c54772866c" (UID: "817798d6-9eae-4ee2-9b2f-53c54772866c"). InnerVolumeSpecName "kube-api-access-jrwlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.512216 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" (UID: "72328da8-8a37-4bd5-b1f5-c26ce6aefd5a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.522418 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a236508b-a76f-4029-b748-7bfdbe412825" (UID: "a236508b-a76f-4029-b748-7bfdbe412825"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.544044 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "817798d6-9eae-4ee2-9b2f-53c54772866c" (UID: "817798d6-9eae-4ee2-9b2f-53c54772866c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.560540 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-utilities\") pod \"817798d6-9eae-4ee2-9b2f-53c54772866c\" (UID: \"817798d6-9eae-4ee2-9b2f-53c54772866c\") " Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.560801 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.560821 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a236508b-a76f-4029-b748-7bfdbe412825-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.560831 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.560843 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrwlm\" (UniqueName: \"kubernetes.io/projected/817798d6-9eae-4ee2-9b2f-53c54772866c-kube-api-access-jrwlm\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.561680 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-utilities" (OuterVolumeSpecName: "utilities") pod "817798d6-9eae-4ee2-9b2f-53c54772866c" (UID: "817798d6-9eae-4ee2-9b2f-53c54772866c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.648904 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t8j7j"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.663845 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/817798d6-9eae-4ee2-9b2f-53c54772866c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.677707 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.754980 4492 generic.go:334] "Generic (PLEG): container finished" podID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" containerID="b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d" exitCode=0 Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.755116 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" event={"ID":"05a0ee0a-7b86-490a-8638-8d74ad1446ea","Type":"ContainerDied","Data":"b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.755189 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" event={"ID":"05a0ee0a-7b86-490a-8638-8d74ad1446ea","Type":"ContainerDied","Data":"b422421b8ee71fe8d83cd891c6f7c49840b9118d3cbc1874361670252c3eb44a"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.755222 4492 scope.go:117] "RemoveContainer" containerID="b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.755474 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.762708 4492 generic.go:334] "Generic (PLEG): container finished" podID="a236508b-a76f-4029-b748-7bfdbe412825" containerID="7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f" exitCode=0 Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.762783 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d925t" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.762784 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d925t" event={"ID":"a236508b-a76f-4029-b748-7bfdbe412825","Type":"ContainerDied","Data":"7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.762814 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d925t" event={"ID":"a236508b-a76f-4029-b748-7bfdbe412825","Type":"ContainerDied","Data":"2d76cf5f0e0acfed3f18147816d606c978c42fa9a9fb1271577f6d96b44b5918"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.787949 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-sb6qj"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.790652 4492 generic.go:334] "Generic (PLEG): container finished" podID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerID="6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53" exitCode=0 Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.790837 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8n4" event={"ID":"817798d6-9eae-4ee2-9b2f-53c54772866c","Type":"ContainerDied","Data":"6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.790970 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kt8n4" event={"ID":"817798d6-9eae-4ee2-9b2f-53c54772866c","Type":"ContainerDied","Data":"08fabe7574a10f96ee9bb717176c9ca0384a01b34de8877259113e0fd29d111b"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.790862 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kt8n4" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.797099 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-sb6qj"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.817267 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d925t"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.846235 4492 generic.go:334] "Generic (PLEG): container finished" podID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerID="0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9" exitCode=0 Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.846307 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6grh" event={"ID":"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a","Type":"ContainerDied","Data":"0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.846328 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m6grh" event={"ID":"72328da8-8a37-4bd5-b1f5-c26ce6aefd5a","Type":"ContainerDied","Data":"a0b7baaf880f7af08352c243ca5deb9d6aa28462e86dc0e101a2f0692cbdf059"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.846453 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m6grh" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.851301 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d925t"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.859382 4492 scope.go:117] "RemoveContainer" containerID="b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d" Nov 26 06:54:34 crc kubenswrapper[4492]: E1126 06:54:34.859731 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d\": container with ID starting with b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d not found: ID does not exist" containerID="b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.859772 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d"} err="failed to get container status \"b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d\": rpc error: code = NotFound desc = could not find container \"b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d\": container with ID starting with b33f01f8ad4b127a9d7935c9e6fe92cea5dc7760d052ae577d69d26322c11c2d not found: ID does not exist" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.859797 4492 scope.go:117] "RemoveContainer" containerID="7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.859993 4492 generic.go:334] "Generic (PLEG): container finished" podID="abb41535-15fd-41da-9e54-d68ec23a99be" containerID="07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5" exitCode=0 Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.860044 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rl4j" event={"ID":"abb41535-15fd-41da-9e54-d68ec23a99be","Type":"ContainerDied","Data":"07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.860076 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7rl4j" event={"ID":"abb41535-15fd-41da-9e54-d68ec23a99be","Type":"ContainerDied","Data":"4646573b5aebfb4bd1e281cf66d449172c5e3c9b7309bd3dd908fc152e78ce23"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.860157 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7rl4j" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.866486 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" event={"ID":"80cbffbd-a369-43ed-a88c-341dca77afa4","Type":"ContainerStarted","Data":"2e8bc9a39776c5cba052803bb7d3761ec20e527afb2795f82df22bf70716b84c"} Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.867882 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.876136 4492 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-t8j7j container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" start-of-body= Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.876193 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" podUID="80cbffbd-a369-43ed-a88c-341dca77afa4" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.909490 4492 scope.go:117] "RemoveContainer" containerID="5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.914561 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" podStartSLOduration=1.914548728 podStartE2EDuration="1.914548728s" podCreationTimestamp="2025-11-26 06:54:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:54:34.88775366 +0000 UTC m=+370.771641958" watchObservedRunningTime="2025-11-26 06:54:34.914548728 +0000 UTC m=+370.798437025" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.919921 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kt8n4"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.923596 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kt8n4"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.942250 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rl4j"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.948679 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7rl4j"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.954016 4492 scope.go:117] "RemoveContainer" containerID="538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.974571 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m6grh"] Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.984303 4492 scope.go:117] "RemoveContainer" containerID="7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f" Nov 26 06:54:34 crc kubenswrapper[4492]: E1126 06:54:34.984836 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f\": container with ID starting with 7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f not found: ID does not exist" containerID="7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.984891 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f"} err="failed to get container status \"7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f\": rpc error: code = NotFound desc = could not find container \"7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f\": container with ID starting with 7e40925eca9fd7ba3c54ecbded7436204da38f23be02845b51219354c719353f not found: ID does not exist" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.984942 4492 scope.go:117] "RemoveContainer" containerID="5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6" Nov 26 06:54:34 crc kubenswrapper[4492]: E1126 06:54:34.985320 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6\": container with ID starting with 5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6 not found: ID does not exist" containerID="5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.985353 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6"} err="failed to get container status \"5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6\": rpc error: code = NotFound desc = could not find container \"5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6\": container with ID starting with 5c7b27de1b38f213c5e43f406854becde66739f48740c31d201497ecfad6d0c6 not found: ID does not exist" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.985375 4492 scope.go:117] "RemoveContainer" containerID="538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.986451 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-m6grh"] Nov 26 06:54:34 crc kubenswrapper[4492]: E1126 06:54:34.991873 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c\": container with ID starting with 538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c not found: ID does not exist" containerID="538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.991926 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c"} err="failed to get container status \"538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c\": rpc error: code = NotFound desc = could not find container \"538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c\": container with ID starting with 538746f3b4832d5f46de80cfa530424b8b962bbe48e5bef5ead3f491ef212b1c not found: ID does not exist" Nov 26 06:54:34 crc kubenswrapper[4492]: I1126 06:54:34.991957 4492 scope.go:117] "RemoveContainer" containerID="6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.015792 4492 scope.go:117] "RemoveContainer" containerID="2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.039248 4492 scope.go:117] "RemoveContainer" containerID="6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.054717 4492 scope.go:117] "RemoveContainer" containerID="6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.057262 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53\": container with ID starting with 6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53 not found: ID does not exist" containerID="6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.057291 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53"} err="failed to get container status \"6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53\": rpc error: code = NotFound desc = could not find container \"6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53\": container with ID starting with 6bd791c0dc1ff550646965141ea1787aecdf318cf53792607495b7bc2054ce53 not found: ID does not exist" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.057309 4492 scope.go:117] "RemoveContainer" containerID="2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.057650 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d\": container with ID starting with 2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d not found: ID does not exist" containerID="2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.057692 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d"} err="failed to get container status \"2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d\": rpc error: code = NotFound desc = could not find container \"2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d\": container with ID starting with 2be9a383b021e7ff801a04f0c6a1da1a44311ad26c1a052124944fa12960f09d not found: ID does not exist" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.057722 4492 scope.go:117] "RemoveContainer" containerID="6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.057982 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c\": container with ID starting with 6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c not found: ID does not exist" containerID="6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.058001 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c"} err="failed to get container status \"6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c\": rpc error: code = NotFound desc = could not find container \"6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c\": container with ID starting with 6429f1b91aa1ecc22354e8662c6099830fd1c2e2f5217a363dc116336d15634c not found: ID does not exist" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.058015 4492 scope.go:117] "RemoveContainer" containerID="0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.071436 4492 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-sb6qj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.071480 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-sb6qj" podUID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.17:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.075164 4492 scope.go:117] "RemoveContainer" containerID="60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.099041 4492 scope.go:117] "RemoveContainer" containerID="2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.120447 4492 scope.go:117] "RemoveContainer" containerID="0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.120902 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9\": container with ID starting with 0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9 not found: ID does not exist" containerID="0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.120945 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9"} err="failed to get container status \"0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9\": rpc error: code = NotFound desc = could not find container \"0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9\": container with ID starting with 0a78d28cc29e4520348fd572074b9bf3878f969cc9b8046f95bac20a59e379b9 not found: ID does not exist" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.120972 4492 scope.go:117] "RemoveContainer" containerID="60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.121474 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208\": container with ID starting with 60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208 not found: ID does not exist" containerID="60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.121533 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208"} err="failed to get container status \"60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208\": rpc error: code = NotFound desc = could not find container \"60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208\": container with ID starting with 60985278a6cbc7779a803eccfebdb966727f19f31cd7a677244c294a051e8208 not found: ID does not exist" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.121576 4492 scope.go:117] "RemoveContainer" containerID="2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.122055 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357\": container with ID starting with 2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357 not found: ID does not exist" containerID="2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.122112 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357"} err="failed to get container status \"2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357\": rpc error: code = NotFound desc = could not find container \"2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357\": container with ID starting with 2e4a6c0023bb2f1646081d397dec6df189d1f6911645a090ff830f92f3375357 not found: ID does not exist" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.122152 4492 scope.go:117] "RemoveContainer" containerID="07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.135990 4492 scope.go:117] "RemoveContainer" containerID="04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.150729 4492 scope.go:117] "RemoveContainer" containerID="af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.165291 4492 scope.go:117] "RemoveContainer" containerID="07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.165691 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5\": container with ID starting with 07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5 not found: ID does not exist" containerID="07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.165736 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5"} err="failed to get container status \"07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5\": rpc error: code = NotFound desc = could not find container \"07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5\": container with ID starting with 07cf7fede72449a880e175a0d1e54a6a7cab0c00ea8dc5d72c1213e82e2dd5a5 not found: ID does not exist" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.165769 4492 scope.go:117] "RemoveContainer" containerID="04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.166116 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2\": container with ID starting with 04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2 not found: ID does not exist" containerID="04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.166185 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2"} err="failed to get container status \"04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2\": rpc error: code = NotFound desc = could not find container \"04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2\": container with ID starting with 04fc4fcd4df3fc430e1e0e4f6c2265d760e496d2a8cbe91c5e9f76641b28f0f2 not found: ID does not exist" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.166216 4492 scope.go:117] "RemoveContainer" containerID="af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.166539 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70\": container with ID starting with af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70 not found: ID does not exist" containerID="af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.166574 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70"} err="failed to get container status \"af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70\": rpc error: code = NotFound desc = could not find container \"af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70\": container with ID starting with af83b13765b0ea434c7438ed702407c9903056625a5ddcef837f8607f4286c70 not found: ID does not exist" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.236869 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-9j8px"] Nov 26 06:54:35 crc kubenswrapper[4492]: W1126 06:54:35.239894 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod729be87d_6119_4c4b_8b50_3e3bf5f7f10d.slice/crio-4acd83038d71165621a8dc3ceb6e0b189d420c775f30cbc9e8064ed1ce2bb7cc WatchSource:0}: Error finding container 4acd83038d71165621a8dc3ceb6e0b189d420c775f30cbc9e8064ed1ce2bb7cc: Status 404 returned error can't find the container with id 4acd83038d71165621a8dc3ceb6e0b189d420c775f30cbc9e8064ed1ce2bb7cc Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.893792 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" event={"ID":"80cbffbd-a369-43ed-a88c-341dca77afa4","Type":"ContainerStarted","Data":"ba25221b0072c7956227d15527cf75fdc38699e70f33312fb931e5bea3969651"} Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.894999 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" event={"ID":"729be87d-6119-4c4b-8b50-3e3bf5f7f10d","Type":"ContainerStarted","Data":"66c7d8069d45106f577435c369692976964e6978bce210facf20639be81bff63"} Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.895044 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" event={"ID":"729be87d-6119-4c4b-8b50-3e3bf5f7f10d","Type":"ContainerStarted","Data":"4acd83038d71165621a8dc3ceb6e0b189d420c775f30cbc9e8064ed1ce2bb7cc"} Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.895187 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.899196 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-t8j7j" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.916551 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" podStartSLOduration=1.916533351 podStartE2EDuration="1.916533351s" podCreationTimestamp="2025-11-26 06:54:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:54:35.911074209 +0000 UTC m=+371.794962526" watchObservedRunningTime="2025-11-26 06:54:35.916533351 +0000 UTC m=+371.800421649" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.961514 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7f8ql"] Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962156 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abb41535-15fd-41da-9e54-d68ec23a99be" containerName="extract-utilities" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962192 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="abb41535-15fd-41da-9e54-d68ec23a99be" containerName="extract-utilities" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962204 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerName="extract-utilities" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962210 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerName="extract-utilities" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962221 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a236508b-a76f-4029-b748-7bfdbe412825" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962228 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a236508b-a76f-4029-b748-7bfdbe412825" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962237 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" containerName="marketplace-operator" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962243 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" containerName="marketplace-operator" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962449 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a236508b-a76f-4029-b748-7bfdbe412825" containerName="extract-utilities" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962460 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a236508b-a76f-4029-b748-7bfdbe412825" containerName="extract-utilities" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962470 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a236508b-a76f-4029-b748-7bfdbe412825" containerName="extract-content" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962476 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a236508b-a76f-4029-b748-7bfdbe412825" containerName="extract-content" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962490 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abb41535-15fd-41da-9e54-d68ec23a99be" containerName="extract-content" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962496 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="abb41535-15fd-41da-9e54-d68ec23a99be" containerName="extract-content" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962506 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerName="extract-utilities" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962511 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerName="extract-utilities" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962518 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abb41535-15fd-41da-9e54-d68ec23a99be" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962523 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="abb41535-15fd-41da-9e54-d68ec23a99be" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962531 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962537 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962546 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962552 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962562 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerName="extract-content" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962568 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerName="extract-content" Nov 26 06:54:35 crc kubenswrapper[4492]: E1126 06:54:35.962578 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerName="extract-content" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962583 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerName="extract-content" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.962982 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="abb41535-15fd-41da-9e54-d68ec23a99be" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.963001 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a236508b-a76f-4029-b748-7bfdbe412825" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.963011 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.963025 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" containerName="registry-server" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.963031 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" containerName="marketplace-operator" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.963795 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.966952 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7f8ql"] Nov 26 06:54:35 crc kubenswrapper[4492]: I1126 06:54:35.966996 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.088941 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjxvw\" (UniqueName: \"kubernetes.io/projected/e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb-kube-api-access-xjxvw\") pod \"community-operators-7f8ql\" (UID: \"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb\") " pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.089053 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb-catalog-content\") pod \"community-operators-7f8ql\" (UID: \"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb\") " pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.089149 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb-utilities\") pod \"community-operators-7f8ql\" (UID: \"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb\") " pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.190311 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb-catalog-content\") pod \"community-operators-7f8ql\" (UID: \"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb\") " pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.190399 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb-utilities\") pod \"community-operators-7f8ql\" (UID: \"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb\") " pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.190479 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjxvw\" (UniqueName: \"kubernetes.io/projected/e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb-kube-api-access-xjxvw\") pod \"community-operators-7f8ql\" (UID: \"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb\") " pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.190868 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb-utilities\") pod \"community-operators-7f8ql\" (UID: \"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb\") " pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.190882 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb-catalog-content\") pod \"community-operators-7f8ql\" (UID: \"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb\") " pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.208116 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjxvw\" (UniqueName: \"kubernetes.io/projected/e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb-kube-api-access-xjxvw\") pod \"community-operators-7f8ql\" (UID: \"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb\") " pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.284024 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.456274 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05a0ee0a-7b86-490a-8638-8d74ad1446ea" path="/var/lib/kubelet/pods/05a0ee0a-7b86-490a-8638-8d74ad1446ea/volumes" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.457604 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72328da8-8a37-4bd5-b1f5-c26ce6aefd5a" path="/var/lib/kubelet/pods/72328da8-8a37-4bd5-b1f5-c26ce6aefd5a/volumes" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.459107 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="817798d6-9eae-4ee2-9b2f-53c54772866c" path="/var/lib/kubelet/pods/817798d6-9eae-4ee2-9b2f-53c54772866c/volumes" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.459861 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a236508b-a76f-4029-b748-7bfdbe412825" path="/var/lib/kubelet/pods/a236508b-a76f-4029-b748-7bfdbe412825/volumes" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.460895 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abb41535-15fd-41da-9e54-d68ec23a99be" path="/var/lib/kubelet/pods/abb41535-15fd-41da-9e54-d68ec23a99be/volumes" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.956472 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-grmnn"] Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.957714 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.961611 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 06:54:36 crc kubenswrapper[4492]: I1126 06:54:36.965691 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-grmnn"] Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.103540 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkh9t\" (UniqueName: \"kubernetes.io/projected/c3fb9fab-ebd1-4433-b24d-16d6ae8b330d-kube-api-access-qkh9t\") pod \"redhat-marketplace-grmnn\" (UID: \"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d\") " pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.103616 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3fb9fab-ebd1-4433-b24d-16d6ae8b330d-utilities\") pod \"redhat-marketplace-grmnn\" (UID: \"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d\") " pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.103643 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3fb9fab-ebd1-4433-b24d-16d6ae8b330d-catalog-content\") pod \"redhat-marketplace-grmnn\" (UID: \"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d\") " pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.133769 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7f8ql"] Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.205068 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkh9t\" (UniqueName: \"kubernetes.io/projected/c3fb9fab-ebd1-4433-b24d-16d6ae8b330d-kube-api-access-qkh9t\") pod \"redhat-marketplace-grmnn\" (UID: \"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d\") " pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.205159 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3fb9fab-ebd1-4433-b24d-16d6ae8b330d-utilities\") pod \"redhat-marketplace-grmnn\" (UID: \"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d\") " pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.205299 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3fb9fab-ebd1-4433-b24d-16d6ae8b330d-catalog-content\") pod \"redhat-marketplace-grmnn\" (UID: \"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d\") " pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.205660 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3fb9fab-ebd1-4433-b24d-16d6ae8b330d-utilities\") pod \"redhat-marketplace-grmnn\" (UID: \"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d\") " pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.205719 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3fb9fab-ebd1-4433-b24d-16d6ae8b330d-catalog-content\") pod \"redhat-marketplace-grmnn\" (UID: \"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d\") " pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.223873 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkh9t\" (UniqueName: \"kubernetes.io/projected/c3fb9fab-ebd1-4433-b24d-16d6ae8b330d-kube-api-access-qkh9t\") pod \"redhat-marketplace-grmnn\" (UID: \"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d\") " pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.274722 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.640313 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-grmnn"] Nov 26 06:54:37 crc kubenswrapper[4492]: W1126 06:54:37.647000 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3fb9fab_ebd1_4433_b24d_16d6ae8b330d.slice/crio-4e8770f17bbb584a6907c3c9d100ab6c8c4a7117d11d2eeebc77f5526d029a14 WatchSource:0}: Error finding container 4e8770f17bbb584a6907c3c9d100ab6c8c4a7117d11d2eeebc77f5526d029a14: Status 404 returned error can't find the container with id 4e8770f17bbb584a6907c3c9d100ab6c8c4a7117d11d2eeebc77f5526d029a14 Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.906687 4492 generic.go:334] "Generic (PLEG): container finished" podID="c3fb9fab-ebd1-4433-b24d-16d6ae8b330d" containerID="289f8d9bc774f29cc9c68f805510cca182bc77cd573cf8eca85924a628107198" exitCode=0 Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.906805 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grmnn" event={"ID":"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d","Type":"ContainerDied","Data":"289f8d9bc774f29cc9c68f805510cca182bc77cd573cf8eca85924a628107198"} Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.906878 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grmnn" event={"ID":"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d","Type":"ContainerStarted","Data":"4e8770f17bbb584a6907c3c9d100ab6c8c4a7117d11d2eeebc77f5526d029a14"} Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.908513 4492 generic.go:334] "Generic (PLEG): container finished" podID="e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb" containerID="e30c164dc24a3dba2a5f16cce4e8017d3aac7128061982f54b427bf5f1878452" exitCode=0 Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.909845 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f8ql" event={"ID":"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb","Type":"ContainerDied","Data":"e30c164dc24a3dba2a5f16cce4e8017d3aac7128061982f54b427bf5f1878452"} Nov 26 06:54:37 crc kubenswrapper[4492]: I1126 06:54:37.909871 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f8ql" event={"ID":"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb","Type":"ContainerStarted","Data":"964fb1efd0ddf8c930f565deecc35f05570b83c671fc60ffb2bd4d3fca9cd403"} Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.364615 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4z9tm"] Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.367090 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.370852 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.375405 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4z9tm"] Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.525402 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/868eed27-2d26-48c8-9070-c0142aa6c2ac-utilities\") pod \"redhat-operators-4z9tm\" (UID: \"868eed27-2d26-48c8-9070-c0142aa6c2ac\") " pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.525482 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/868eed27-2d26-48c8-9070-c0142aa6c2ac-catalog-content\") pod \"redhat-operators-4z9tm\" (UID: \"868eed27-2d26-48c8-9070-c0142aa6c2ac\") " pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.525539 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45kb4\" (UniqueName: \"kubernetes.io/projected/868eed27-2d26-48c8-9070-c0142aa6c2ac-kube-api-access-45kb4\") pod \"redhat-operators-4z9tm\" (UID: \"868eed27-2d26-48c8-9070-c0142aa6c2ac\") " pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.626376 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45kb4\" (UniqueName: \"kubernetes.io/projected/868eed27-2d26-48c8-9070-c0142aa6c2ac-kube-api-access-45kb4\") pod \"redhat-operators-4z9tm\" (UID: \"868eed27-2d26-48c8-9070-c0142aa6c2ac\") " pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.626485 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/868eed27-2d26-48c8-9070-c0142aa6c2ac-utilities\") pod \"redhat-operators-4z9tm\" (UID: \"868eed27-2d26-48c8-9070-c0142aa6c2ac\") " pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.626530 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/868eed27-2d26-48c8-9070-c0142aa6c2ac-catalog-content\") pod \"redhat-operators-4z9tm\" (UID: \"868eed27-2d26-48c8-9070-c0142aa6c2ac\") " pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.627035 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/868eed27-2d26-48c8-9070-c0142aa6c2ac-catalog-content\") pod \"redhat-operators-4z9tm\" (UID: \"868eed27-2d26-48c8-9070-c0142aa6c2ac\") " pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.627138 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/868eed27-2d26-48c8-9070-c0142aa6c2ac-utilities\") pod \"redhat-operators-4z9tm\" (UID: \"868eed27-2d26-48c8-9070-c0142aa6c2ac\") " pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.648433 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45kb4\" (UniqueName: \"kubernetes.io/projected/868eed27-2d26-48c8-9070-c0142aa6c2ac-kube-api-access-45kb4\") pod \"redhat-operators-4z9tm\" (UID: \"868eed27-2d26-48c8-9070-c0142aa6c2ac\") " pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.688827 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.918939 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f8ql" event={"ID":"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb","Type":"ContainerStarted","Data":"172cd6e86fbda1c2864345cbe5b778d0cf256119e5af6e64cec8e6e2745064ba"} Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.923989 4492 generic.go:334] "Generic (PLEG): container finished" podID="c3fb9fab-ebd1-4433-b24d-16d6ae8b330d" containerID="bbda1cbcc4c49af683812fab369ec267a3bcb48a97f7ba46fd7e220b7d9e3d47" exitCode=0 Nov 26 06:54:38 crc kubenswrapper[4492]: I1126 06:54:38.924054 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grmnn" event={"ID":"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d","Type":"ContainerDied","Data":"bbda1cbcc4c49af683812fab369ec267a3bcb48a97f7ba46fd7e220b7d9e3d47"} Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.100786 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4z9tm"] Nov 26 06:54:39 crc kubenswrapper[4492]: W1126 06:54:39.106644 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod868eed27_2d26_48c8_9070_c0142aa6c2ac.slice/crio-fb0528678783d60ea93e91b95cc22b2c12766153dac6f49eb060f2b8745e3fb5 WatchSource:0}: Error finding container fb0528678783d60ea93e91b95cc22b2c12766153dac6f49eb060f2b8745e3fb5: Status 404 returned error can't find the container with id fb0528678783d60ea93e91b95cc22b2c12766153dac6f49eb060f2b8745e3fb5 Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.370980 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sc8jj"] Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.372267 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.378983 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sc8jj"] Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.381697 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.438315 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c-utilities\") pod \"certified-operators-sc8jj\" (UID: \"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c\") " pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.438362 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58td2\" (UniqueName: \"kubernetes.io/projected/ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c-kube-api-access-58td2\") pod \"certified-operators-sc8jj\" (UID: \"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c\") " pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.438396 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c-catalog-content\") pod \"certified-operators-sc8jj\" (UID: \"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c\") " pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.539400 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c-catalog-content\") pod \"certified-operators-sc8jj\" (UID: \"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c\") " pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.539550 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c-utilities\") pod \"certified-operators-sc8jj\" (UID: \"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c\") " pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.539594 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58td2\" (UniqueName: \"kubernetes.io/projected/ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c-kube-api-access-58td2\") pod \"certified-operators-sc8jj\" (UID: \"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c\") " pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.540028 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c-catalog-content\") pod \"certified-operators-sc8jj\" (UID: \"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c\") " pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.540432 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c-utilities\") pod \"certified-operators-sc8jj\" (UID: \"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c\") " pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.556668 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58td2\" (UniqueName: \"kubernetes.io/projected/ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c-kube-api-access-58td2\") pod \"certified-operators-sc8jj\" (UID: \"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c\") " pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.696054 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.935332 4492 generic.go:334] "Generic (PLEG): container finished" podID="e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb" containerID="172cd6e86fbda1c2864345cbe5b778d0cf256119e5af6e64cec8e6e2745064ba" exitCode=0 Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.935635 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f8ql" event={"ID":"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb","Type":"ContainerDied","Data":"172cd6e86fbda1c2864345cbe5b778d0cf256119e5af6e64cec8e6e2745064ba"} Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.937217 4492 generic.go:334] "Generic (PLEG): container finished" podID="868eed27-2d26-48c8-9070-c0142aa6c2ac" containerID="b912fa40877e045f3afcabb9f1d95a137d4c5d5871113c575b9faf8f40a450d9" exitCode=0 Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.937253 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4z9tm" event={"ID":"868eed27-2d26-48c8-9070-c0142aa6c2ac","Type":"ContainerDied","Data":"b912fa40877e045f3afcabb9f1d95a137d4c5d5871113c575b9faf8f40a450d9"} Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.937272 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4z9tm" event={"ID":"868eed27-2d26-48c8-9070-c0142aa6c2ac","Type":"ContainerStarted","Data":"fb0528678783d60ea93e91b95cc22b2c12766153dac6f49eb060f2b8745e3fb5"} Nov 26 06:54:39 crc kubenswrapper[4492]: I1126 06:54:39.941877 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-grmnn" event={"ID":"c3fb9fab-ebd1-4433-b24d-16d6ae8b330d","Type":"ContainerStarted","Data":"8570e757a902826227083f6ff8137aa29d13d27f502078cc058e30d63b59f2ed"} Nov 26 06:54:40 crc kubenswrapper[4492]: I1126 06:54:40.106579 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-grmnn" podStartSLOduration=2.516098571 podStartE2EDuration="4.106555929s" podCreationTimestamp="2025-11-26 06:54:36 +0000 UTC" firstStartedPulling="2025-11-26 06:54:37.909964278 +0000 UTC m=+373.793852575" lastFinishedPulling="2025-11-26 06:54:39.500421635 +0000 UTC m=+375.384309933" observedRunningTime="2025-11-26 06:54:40.009539817 +0000 UTC m=+375.893428114" watchObservedRunningTime="2025-11-26 06:54:40.106555929 +0000 UTC m=+375.990444227" Nov 26 06:54:40 crc kubenswrapper[4492]: I1126 06:54:40.110140 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sc8jj"] Nov 26 06:54:40 crc kubenswrapper[4492]: W1126 06:54:40.112925 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae3e0bc6_fd20_4da9_9fd1_c537bc7aa21c.slice/crio-6f5d60eebcdf3c7f883b2ebe083e9e73963a941de4004eb1933719369eb4ac25 WatchSource:0}: Error finding container 6f5d60eebcdf3c7f883b2ebe083e9e73963a941de4004eb1933719369eb4ac25: Status 404 returned error can't find the container with id 6f5d60eebcdf3c7f883b2ebe083e9e73963a941de4004eb1933719369eb4ac25 Nov 26 06:54:40 crc kubenswrapper[4492]: I1126 06:54:40.954022 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f8ql" event={"ID":"e616c31d-21ef-4ce1-8064-5a1dbcbfcbeb","Type":"ContainerStarted","Data":"fa514244127ce8614b74c592887753a5edb0ee8e6dedd55ebf7d7c67c0e77edc"} Nov 26 06:54:40 crc kubenswrapper[4492]: I1126 06:54:40.956535 4492 generic.go:334] "Generic (PLEG): container finished" podID="ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c" containerID="06e22663b5a8fc4d140a1589af5cf73e9c1f97ae3b420a40763aeed362bc3456" exitCode=0 Nov 26 06:54:40 crc kubenswrapper[4492]: I1126 06:54:40.956865 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sc8jj" event={"ID":"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c","Type":"ContainerDied","Data":"06e22663b5a8fc4d140a1589af5cf73e9c1f97ae3b420a40763aeed362bc3456"} Nov 26 06:54:40 crc kubenswrapper[4492]: I1126 06:54:40.956947 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sc8jj" event={"ID":"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c","Type":"ContainerStarted","Data":"6f5d60eebcdf3c7f883b2ebe083e9e73963a941de4004eb1933719369eb4ac25"} Nov 26 06:54:40 crc kubenswrapper[4492]: I1126 06:54:40.974538 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7f8ql" podStartSLOduration=3.371448379 podStartE2EDuration="5.974516053s" podCreationTimestamp="2025-11-26 06:54:35 +0000 UTC" firstStartedPulling="2025-11-26 06:54:37.911314285 +0000 UTC m=+373.795202583" lastFinishedPulling="2025-11-26 06:54:40.514381969 +0000 UTC m=+376.398270257" observedRunningTime="2025-11-26 06:54:40.973949528 +0000 UTC m=+376.857837827" watchObservedRunningTime="2025-11-26 06:54:40.974516053 +0000 UTC m=+376.858404352" Nov 26 06:54:42 crc kubenswrapper[4492]: I1126 06:54:42.970427 4492 generic.go:334] "Generic (PLEG): container finished" podID="868eed27-2d26-48c8-9070-c0142aa6c2ac" containerID="b3ffcb1541cd00d0bf0b5a096ceaa6546092b41de0c67aa2d16c6301b9ae2aeb" exitCode=0 Nov 26 06:54:42 crc kubenswrapper[4492]: I1126 06:54:42.970554 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4z9tm" event={"ID":"868eed27-2d26-48c8-9070-c0142aa6c2ac","Type":"ContainerDied","Data":"b3ffcb1541cd00d0bf0b5a096ceaa6546092b41de0c67aa2d16c6301b9ae2aeb"} Nov 26 06:54:42 crc kubenswrapper[4492]: I1126 06:54:42.978204 4492 generic.go:334] "Generic (PLEG): container finished" podID="ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c" containerID="ebbf1692296826ce2943a850b965f980864be265101fb846421e655c9f2be771" exitCode=0 Nov 26 06:54:42 crc kubenswrapper[4492]: I1126 06:54:42.978253 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sc8jj" event={"ID":"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c","Type":"ContainerDied","Data":"ebbf1692296826ce2943a850b965f980864be265101fb846421e655c9f2be771"} Nov 26 06:54:43 crc kubenswrapper[4492]: I1126 06:54:43.989144 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sc8jj" event={"ID":"ae3e0bc6-fd20-4da9-9fd1-c537bc7aa21c","Type":"ContainerStarted","Data":"34df7331a699e1e23cf32b69a5b627d2cde72c95fda1b40797dfcb98c26c4793"} Nov 26 06:54:43 crc kubenswrapper[4492]: I1126 06:54:43.992061 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4z9tm" event={"ID":"868eed27-2d26-48c8-9070-c0142aa6c2ac","Type":"ContainerStarted","Data":"117f68d0c6ca96a2dc25d53856f178f52937861c7d6658bacc33483424cc9c74"} Nov 26 06:54:44 crc kubenswrapper[4492]: I1126 06:54:44.010979 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sc8jj" podStartSLOduration=2.46988492 podStartE2EDuration="5.010967218s" podCreationTimestamp="2025-11-26 06:54:39 +0000 UTC" firstStartedPulling="2025-11-26 06:54:40.958871396 +0000 UTC m=+376.842759695" lastFinishedPulling="2025-11-26 06:54:43.499953695 +0000 UTC m=+379.383841993" observedRunningTime="2025-11-26 06:54:44.007811257 +0000 UTC m=+379.891699554" watchObservedRunningTime="2025-11-26 06:54:44.010967218 +0000 UTC m=+379.894855516" Nov 26 06:54:46 crc kubenswrapper[4492]: I1126 06:54:46.285475 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:46 crc kubenswrapper[4492]: I1126 06:54:46.285888 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:46 crc kubenswrapper[4492]: I1126 06:54:46.324618 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:46 crc kubenswrapper[4492]: I1126 06:54:46.344990 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4z9tm" podStartSLOduration=4.735812152 podStartE2EDuration="8.344967872s" podCreationTimestamp="2025-11-26 06:54:38 +0000 UTC" firstStartedPulling="2025-11-26 06:54:39.940472399 +0000 UTC m=+375.824360697" lastFinishedPulling="2025-11-26 06:54:43.54962812 +0000 UTC m=+379.433516417" observedRunningTime="2025-11-26 06:54:44.032265263 +0000 UTC m=+379.916153561" watchObservedRunningTime="2025-11-26 06:54:46.344967872 +0000 UTC m=+382.228856170" Nov 26 06:54:47 crc kubenswrapper[4492]: I1126 06:54:47.039813 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7f8ql" Nov 26 06:54:47 crc kubenswrapper[4492]: I1126 06:54:47.275554 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:47 crc kubenswrapper[4492]: I1126 06:54:47.275876 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:47 crc kubenswrapper[4492]: I1126 06:54:47.307828 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:48 crc kubenswrapper[4492]: I1126 06:54:48.055763 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-grmnn" Nov 26 06:54:48 crc kubenswrapper[4492]: I1126 06:54:48.689795 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:48 crc kubenswrapper[4492]: I1126 06:54:48.689859 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:48 crc kubenswrapper[4492]: I1126 06:54:48.728223 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:49 crc kubenswrapper[4492]: I1126 06:54:49.050342 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4z9tm" Nov 26 06:54:49 crc kubenswrapper[4492]: I1126 06:54:49.441771 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:54:49 crc kubenswrapper[4492]: I1126 06:54:49.441839 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:54:49 crc kubenswrapper[4492]: I1126 06:54:49.441887 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:54:49 crc kubenswrapper[4492]: I1126 06:54:49.442273 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ebb7c14f054bf1a3e0275de2bfc80f082c3d5f1d3a6a0ca4e02d8ce5ee7ee01b"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:54:49 crc kubenswrapper[4492]: I1126 06:54:49.442343 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://ebb7c14f054bf1a3e0275de2bfc80f082c3d5f1d3a6a0ca4e02d8ce5ee7ee01b" gracePeriod=600 Nov 26 06:54:49 crc kubenswrapper[4492]: I1126 06:54:49.696193 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:49 crc kubenswrapper[4492]: I1126 06:54:49.696269 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:49 crc kubenswrapper[4492]: I1126 06:54:49.732118 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:50 crc kubenswrapper[4492]: I1126 06:54:50.026487 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="ebb7c14f054bf1a3e0275de2bfc80f082c3d5f1d3a6a0ca4e02d8ce5ee7ee01b" exitCode=0 Nov 26 06:54:50 crc kubenswrapper[4492]: I1126 06:54:50.026634 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"ebb7c14f054bf1a3e0275de2bfc80f082c3d5f1d3a6a0ca4e02d8ce5ee7ee01b"} Nov 26 06:54:50 crc kubenswrapper[4492]: I1126 06:54:50.026933 4492 scope.go:117] "RemoveContainer" containerID="695ce8a08afa726c47c6aa48ddd828cbc420a9740de6cf165351e5bd68174a89" Nov 26 06:54:50 crc kubenswrapper[4492]: I1126 06:54:50.060233 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sc8jj" Nov 26 06:54:51 crc kubenswrapper[4492]: I1126 06:54:51.036375 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"d4fa1ef97b74bcf353b427d83c9b4b11261068cbcdfde33a49de78b55d802254"} Nov 26 06:54:54 crc kubenswrapper[4492]: I1126 06:54:54.683918 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-9j8px" Nov 26 06:54:54 crc kubenswrapper[4492]: I1126 06:54:54.777325 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-54mhl"] Nov 26 06:55:19 crc kubenswrapper[4492]: I1126 06:55:19.826744 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" podUID="2b02d314-392f-44f3-a88c-57d8852fbcf9" containerName="registry" containerID="cri-o://3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3" gracePeriod=30 Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.159388 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.209243 4492 generic.go:334] "Generic (PLEG): container finished" podID="2b02d314-392f-44f3-a88c-57d8852fbcf9" containerID="3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3" exitCode=0 Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.209294 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" event={"ID":"2b02d314-392f-44f3-a88c-57d8852fbcf9","Type":"ContainerDied","Data":"3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3"} Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.209348 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" event={"ID":"2b02d314-392f-44f3-a88c-57d8852fbcf9","Type":"ContainerDied","Data":"c60b9310417f8d93196e36ee8e19450f7df7c5499d7ad5d69169eafac2b527b5"} Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.209351 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-54mhl" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.209369 4492 scope.go:117] "RemoveContainer" containerID="3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.224474 4492 scope.go:117] "RemoveContainer" containerID="3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3" Nov 26 06:55:20 crc kubenswrapper[4492]: E1126 06:55:20.224769 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3\": container with ID starting with 3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3 not found: ID does not exist" containerID="3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.224825 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3"} err="failed to get container status \"3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3\": rpc error: code = NotFound desc = could not find container \"3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3\": container with ID starting with 3f9af8c9d91a796d84df803f461901a2a386515abce2b0866d100c06c289daf3 not found: ID does not exist" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.315801 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-bound-sa-token\") pod \"2b02d314-392f-44f3-a88c-57d8852fbcf9\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.315848 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2b02d314-392f-44f3-a88c-57d8852fbcf9-installation-pull-secrets\") pod \"2b02d314-392f-44f3-a88c-57d8852fbcf9\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.315880 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2b02d314-392f-44f3-a88c-57d8852fbcf9-ca-trust-extracted\") pod \"2b02d314-392f-44f3-a88c-57d8852fbcf9\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.315930 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-trusted-ca\") pod \"2b02d314-392f-44f3-a88c-57d8852fbcf9\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.315958 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2fh4\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-kube-api-access-q2fh4\") pod \"2b02d314-392f-44f3-a88c-57d8852fbcf9\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.316010 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-tls\") pod \"2b02d314-392f-44f3-a88c-57d8852fbcf9\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.316571 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "2b02d314-392f-44f3-a88c-57d8852fbcf9" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.316878 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"2b02d314-392f-44f3-a88c-57d8852fbcf9\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.316919 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-certificates\") pod \"2b02d314-392f-44f3-a88c-57d8852fbcf9\" (UID: \"2b02d314-392f-44f3-a88c-57d8852fbcf9\") " Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.317093 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.317621 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "2b02d314-392f-44f3-a88c-57d8852fbcf9" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.321729 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "2b02d314-392f-44f3-a88c-57d8852fbcf9" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.322067 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b02d314-392f-44f3-a88c-57d8852fbcf9-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "2b02d314-392f-44f3-a88c-57d8852fbcf9" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.322217 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "2b02d314-392f-44f3-a88c-57d8852fbcf9" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.323268 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-kube-api-access-q2fh4" (OuterVolumeSpecName: "kube-api-access-q2fh4") pod "2b02d314-392f-44f3-a88c-57d8852fbcf9" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9"). InnerVolumeSpecName "kube-api-access-q2fh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.325969 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "2b02d314-392f-44f3-a88c-57d8852fbcf9" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.330100 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b02d314-392f-44f3-a88c-57d8852fbcf9-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "2b02d314-392f-44f3-a88c-57d8852fbcf9" (UID: "2b02d314-392f-44f3-a88c-57d8852fbcf9"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.418130 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2fh4\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-kube-api-access-q2fh4\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.418162 4492 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.418187 4492 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2b02d314-392f-44f3-a88c-57d8852fbcf9-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.418196 4492 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2b02d314-392f-44f3-a88c-57d8852fbcf9-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.418204 4492 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2b02d314-392f-44f3-a88c-57d8852fbcf9-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.418216 4492 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2b02d314-392f-44f3-a88c-57d8852fbcf9-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.538586 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-54mhl"] Nov 26 06:55:20 crc kubenswrapper[4492]: I1126 06:55:20.540524 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-54mhl"] Nov 26 06:55:22 crc kubenswrapper[4492]: I1126 06:55:22.443978 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b02d314-392f-44f3-a88c-57d8852fbcf9" path="/var/lib/kubelet/pods/2b02d314-392f-44f3-a88c-57d8852fbcf9/volumes" Nov 26 06:57:19 crc kubenswrapper[4492]: I1126 06:57:19.441940 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:57:19 crc kubenswrapper[4492]: I1126 06:57:19.442443 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:57:49 crc kubenswrapper[4492]: I1126 06:57:49.441602 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:57:49 crc kubenswrapper[4492]: I1126 06:57:49.442054 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:58:19 crc kubenswrapper[4492]: I1126 06:58:19.441099 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:58:19 crc kubenswrapper[4492]: I1126 06:58:19.441676 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:58:19 crc kubenswrapper[4492]: I1126 06:58:19.441720 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 06:58:19 crc kubenswrapper[4492]: I1126 06:58:19.442087 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d4fa1ef97b74bcf353b427d83c9b4b11261068cbcdfde33a49de78b55d802254"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:58:19 crc kubenswrapper[4492]: I1126 06:58:19.442138 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://d4fa1ef97b74bcf353b427d83c9b4b11261068cbcdfde33a49de78b55d802254" gracePeriod=600 Nov 26 06:58:20 crc kubenswrapper[4492]: I1126 06:58:20.076203 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="d4fa1ef97b74bcf353b427d83c9b4b11261068cbcdfde33a49de78b55d802254" exitCode=0 Nov 26 06:58:20 crc kubenswrapper[4492]: I1126 06:58:20.076240 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"d4fa1ef97b74bcf353b427d83c9b4b11261068cbcdfde33a49de78b55d802254"} Nov 26 06:58:20 crc kubenswrapper[4492]: I1126 06:58:20.076741 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"3a3d7f8ad7361b1a0985dafaf6a7904b1bcd29d5ae978e67890841e77797b9ac"} Nov 26 06:58:20 crc kubenswrapper[4492]: I1126 06:58:20.076764 4492 scope.go:117] "RemoveContainer" containerID="ebb7c14f054bf1a3e0275de2bfc80f082c3d5f1d3a6a0ca4e02d8ce5ee7ee01b" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.961821 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fv2df"] Nov 26 06:59:02 crc kubenswrapper[4492]: E1126 06:59:02.962930 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b02d314-392f-44f3-a88c-57d8852fbcf9" containerName="registry" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.962952 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b02d314-392f-44f3-a88c-57d8852fbcf9" containerName="registry" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.963109 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b02d314-392f-44f3-a88c-57d8852fbcf9" containerName="registry" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.963717 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fv2df" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.969575 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.969580 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.969905 4492 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-gz4bv" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.974769 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-882wr"] Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.975622 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-882wr" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.977575 4492 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-df9nw" Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.987773 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fv2df"] Nov 26 06:59:02 crc kubenswrapper[4492]: I1126 06:59:02.999679 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-gtpdm"] Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.000808 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.002534 4492 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-lsq8b" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.003220 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-882wr"] Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.008616 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-gtpdm"] Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.012605 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5k5t\" (UniqueName: \"kubernetes.io/projected/d1a65554-d633-42e2-9869-119dc5f457e4-kube-api-access-t5k5t\") pod \"cert-manager-webhook-5655c58dd6-gtpdm\" (UID: \"d1a65554-d633-42e2-9869-119dc5f457e4\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.012654 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x7nr\" (UniqueName: \"kubernetes.io/projected/3ed71dbf-8ddb-4b55-bb36-509490ac3e1b-kube-api-access-4x7nr\") pod \"cert-manager-5b446d88c5-882wr\" (UID: \"3ed71dbf-8ddb-4b55-bb36-509490ac3e1b\") " pod="cert-manager/cert-manager-5b446d88c5-882wr" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.012775 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7zwk\" (UniqueName: \"kubernetes.io/projected/cb547269-1d6d-4e01-8374-f3327dd441b5-kube-api-access-c7zwk\") pod \"cert-manager-cainjector-7f985d654d-fv2df\" (UID: \"cb547269-1d6d-4e01-8374-f3327dd441b5\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fv2df" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.113709 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7zwk\" (UniqueName: \"kubernetes.io/projected/cb547269-1d6d-4e01-8374-f3327dd441b5-kube-api-access-c7zwk\") pod \"cert-manager-cainjector-7f985d654d-fv2df\" (UID: \"cb547269-1d6d-4e01-8374-f3327dd441b5\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fv2df" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.113766 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5k5t\" (UniqueName: \"kubernetes.io/projected/d1a65554-d633-42e2-9869-119dc5f457e4-kube-api-access-t5k5t\") pod \"cert-manager-webhook-5655c58dd6-gtpdm\" (UID: \"d1a65554-d633-42e2-9869-119dc5f457e4\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.113794 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x7nr\" (UniqueName: \"kubernetes.io/projected/3ed71dbf-8ddb-4b55-bb36-509490ac3e1b-kube-api-access-4x7nr\") pod \"cert-manager-5b446d88c5-882wr\" (UID: \"3ed71dbf-8ddb-4b55-bb36-509490ac3e1b\") " pod="cert-manager/cert-manager-5b446d88c5-882wr" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.131117 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x7nr\" (UniqueName: \"kubernetes.io/projected/3ed71dbf-8ddb-4b55-bb36-509490ac3e1b-kube-api-access-4x7nr\") pod \"cert-manager-5b446d88c5-882wr\" (UID: \"3ed71dbf-8ddb-4b55-bb36-509490ac3e1b\") " pod="cert-manager/cert-manager-5b446d88c5-882wr" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.131117 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5k5t\" (UniqueName: \"kubernetes.io/projected/d1a65554-d633-42e2-9869-119dc5f457e4-kube-api-access-t5k5t\") pod \"cert-manager-webhook-5655c58dd6-gtpdm\" (UID: \"d1a65554-d633-42e2-9869-119dc5f457e4\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.131698 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7zwk\" (UniqueName: \"kubernetes.io/projected/cb547269-1d6d-4e01-8374-f3327dd441b5-kube-api-access-c7zwk\") pod \"cert-manager-cainjector-7f985d654d-fv2df\" (UID: \"cb547269-1d6d-4e01-8374-f3327dd441b5\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fv2df" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.293909 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fv2df" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.298717 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-882wr" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.313638 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.485204 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-882wr"] Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.499403 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.527697 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fv2df"] Nov 26 06:59:03 crc kubenswrapper[4492]: I1126 06:59:03.757270 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-gtpdm"] Nov 26 06:59:03 crc kubenswrapper[4492]: W1126 06:59:03.763357 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1a65554_d633_42e2_9869_119dc5f457e4.slice/crio-6cf2cae547fd559f7a43d85731dc4ee6f2c07d9e3c6bc824c74c8fb2603a437f WatchSource:0}: Error finding container 6cf2cae547fd559f7a43d85731dc4ee6f2c07d9e3c6bc824c74c8fb2603a437f: Status 404 returned error can't find the container with id 6cf2cae547fd559f7a43d85731dc4ee6f2c07d9e3c6bc824c74c8fb2603a437f Nov 26 06:59:04 crc kubenswrapper[4492]: I1126 06:59:04.315381 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" event={"ID":"d1a65554-d633-42e2-9869-119dc5f457e4","Type":"ContainerStarted","Data":"6cf2cae547fd559f7a43d85731dc4ee6f2c07d9e3c6bc824c74c8fb2603a437f"} Nov 26 06:59:04 crc kubenswrapper[4492]: I1126 06:59:04.316223 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-882wr" event={"ID":"3ed71dbf-8ddb-4b55-bb36-509490ac3e1b","Type":"ContainerStarted","Data":"e579990a6e3e7f1b62951967bc8aac5367a02b6be3a006775116af227415d19f"} Nov 26 06:59:04 crc kubenswrapper[4492]: I1126 06:59:04.317109 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fv2df" event={"ID":"cb547269-1d6d-4e01-8374-f3327dd441b5","Type":"ContainerStarted","Data":"9edda3cc02d8a73397f75a210f9a92ebf03108898cecae3c9253ba9be5f87c82"} Nov 26 06:59:07 crc kubenswrapper[4492]: I1126 06:59:07.336069 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-882wr" event={"ID":"3ed71dbf-8ddb-4b55-bb36-509490ac3e1b","Type":"ContainerStarted","Data":"69e72a9488d84f8c22190550b8af63bf15519b0631c380279e4914ac360c0dca"} Nov 26 06:59:07 crc kubenswrapper[4492]: I1126 06:59:07.337925 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fv2df" event={"ID":"cb547269-1d6d-4e01-8374-f3327dd441b5","Type":"ContainerStarted","Data":"fa2ad99f8b84eebdd156654cbb9d9ba9ed15e7ba0d92db78039e652df0b9b770"} Nov 26 06:59:07 crc kubenswrapper[4492]: I1126 06:59:07.340780 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" event={"ID":"d1a65554-d633-42e2-9869-119dc5f457e4","Type":"ContainerStarted","Data":"3e3153ec091ddb929e7b6613464a81af5521293f9b99d2ef67521edc473f65d9"} Nov 26 06:59:07 crc kubenswrapper[4492]: I1126 06:59:07.341684 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" Nov 26 06:59:07 crc kubenswrapper[4492]: I1126 06:59:07.354444 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-882wr" podStartSLOduration=2.575001101 podStartE2EDuration="5.354427306s" podCreationTimestamp="2025-11-26 06:59:02 +0000 UTC" firstStartedPulling="2025-11-26 06:59:03.498900814 +0000 UTC m=+639.382789112" lastFinishedPulling="2025-11-26 06:59:06.278327019 +0000 UTC m=+642.162215317" observedRunningTime="2025-11-26 06:59:07.348276823 +0000 UTC m=+643.232165112" watchObservedRunningTime="2025-11-26 06:59:07.354427306 +0000 UTC m=+643.238315604" Nov 26 06:59:07 crc kubenswrapper[4492]: I1126 06:59:07.366280 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-fv2df" podStartSLOduration=2.646608589 podStartE2EDuration="5.366254613s" podCreationTimestamp="2025-11-26 06:59:02 +0000 UTC" firstStartedPulling="2025-11-26 06:59:03.535322548 +0000 UTC m=+639.419210847" lastFinishedPulling="2025-11-26 06:59:06.254968574 +0000 UTC m=+642.138856871" observedRunningTime="2025-11-26 06:59:07.362982631 +0000 UTC m=+643.246870929" watchObservedRunningTime="2025-11-26 06:59:07.366254613 +0000 UTC m=+643.250142911" Nov 26 06:59:07 crc kubenswrapper[4492]: I1126 06:59:07.388696 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" podStartSLOduration=2.875071384 podStartE2EDuration="5.388676658s" podCreationTimestamp="2025-11-26 06:59:02 +0000 UTC" firstStartedPulling="2025-11-26 06:59:03.765624693 +0000 UTC m=+639.649512991" lastFinishedPulling="2025-11-26 06:59:06.279229967 +0000 UTC m=+642.163118265" observedRunningTime="2025-11-26 06:59:07.38392308 +0000 UTC m=+643.267811379" watchObservedRunningTime="2025-11-26 06:59:07.388676658 +0000 UTC m=+643.272564956" Nov 26 06:59:13 crc kubenswrapper[4492]: I1126 06:59:13.316224 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-gtpdm" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.651710 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lghgp"] Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.652035 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovn-controller" containerID="cri-o://1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e" gracePeriod=30 Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.652150 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kube-rbac-proxy-node" containerID="cri-o://5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097" gracePeriod=30 Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.652148 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="sbdb" containerID="cri-o://dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39" gracePeriod=30 Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.652195 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="northd" containerID="cri-o://501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71" gracePeriod=30 Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.652203 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803" gracePeriod=30 Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.652226 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovn-acl-logging" containerID="cri-o://0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc" gracePeriod=30 Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.652109 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="nbdb" containerID="cri-o://dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046" gracePeriod=30 Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.688944 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" containerID="cri-o://4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334" gracePeriod=30 Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.931265 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/3.log" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.934085 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovn-acl-logging/0.log" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.934562 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovn-controller/0.log" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.934935 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976070 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-w2769"] Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976279 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="nbdb" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976293 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="nbdb" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976303 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kubecfg-setup" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976309 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kubecfg-setup" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976315 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovn-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976321 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovn-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976328 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976334 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976341 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovn-acl-logging" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976346 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovn-acl-logging" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976362 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kube-rbac-proxy-node" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976367 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kube-rbac-proxy-node" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976375 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="northd" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976381 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="northd" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976391 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976396 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976403 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976408 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976415 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976420 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976428 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="sbdb" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976440 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="sbdb" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976447 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976452 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: E1126 06:59:14.976459 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976464 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976556 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976563 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976571 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovn-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976575 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="northd" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976583 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976588 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976598 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="nbdb" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976606 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976613 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovn-acl-logging" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976618 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="kube-rbac-proxy-node" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976625 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="sbdb" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.976791 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" containerName="ovnkube-controller" Nov 26 06:59:14 crc kubenswrapper[4492]: I1126 06:59:14.978082 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.045979 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-var-lib-cni-networks-ovn-kubernetes\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046030 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-openvswitch\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046055 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-script-lib\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046104 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046132 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-bin\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046127 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046158 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046229 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-slash\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046253 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-slash" (OuterVolumeSpecName: "host-slash") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046348 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-netd\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046377 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046454 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046474 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b104695-0850-4fb3-b2f8-f764435f8694-ovn-node-metrics-cert\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046492 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-systemd\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046510 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-env-overrides\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046523 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-systemd-units\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046548 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-kubelet\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046576 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-node-log\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046609 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mc5q6\" (UniqueName: \"kubernetes.io/projected/9b104695-0850-4fb3-b2f8-f764435f8694-kube-api-access-mc5q6\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046624 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-config\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046640 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-ovn-kubernetes\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046658 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-var-lib-openvswitch\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046699 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-node-log" (OuterVolumeSpecName: "node-log") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046705 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046725 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046749 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046785 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.046957 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047003 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047042 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-etc-openvswitch\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047069 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-ovn\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047086 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-log-socket\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047115 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-netns\") pod \"9b104695-0850-4fb3-b2f8-f764435f8694\" (UID: \"9b104695-0850-4fb3-b2f8-f764435f8694\") " Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047154 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047206 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-log-socket" (OuterVolumeSpecName: "log-socket") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047166 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047300 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047359 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-var-lib-openvswitch\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047389 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-run-netns\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047411 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-run-systemd\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047424 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d591ad07-c69d-458c-9b18-b09c467be552-ovnkube-script-lib\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047440 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047477 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-systemd-units\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047490 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-etc-openvswitch\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047506 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fmtr\" (UniqueName: \"kubernetes.io/projected/d591ad07-c69d-458c-9b18-b09c467be552-kube-api-access-4fmtr\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047523 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-kubelet\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047536 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-cni-netd\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047603 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-node-log\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047656 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-run-ovn-kubernetes\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047682 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d591ad07-c69d-458c-9b18-b09c467be552-ovn-node-metrics-cert\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047710 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-run-openvswitch\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047727 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-run-ovn\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047743 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d591ad07-c69d-458c-9b18-b09c467be552-env-overrides\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047769 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-log-socket\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047782 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-slash\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047801 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d591ad07-c69d-458c-9b18-b09c467be552-ovnkube-config\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047907 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-cni-bin\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047962 4492 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047978 4492 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047987 4492 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-node-log\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.047994 4492 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048002 4492 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048010 4492 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048018 4492 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048026 4492 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048033 4492 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-log-socket\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048040 4492 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048047 4492 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048055 4492 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048062 4492 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048069 4492 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048076 4492 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-slash\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048083 4492 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.048089 4492 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b104695-0850-4fb3-b2f8-f764435f8694-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.050573 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b104695-0850-4fb3-b2f8-f764435f8694-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.051697 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b104695-0850-4fb3-b2f8-f764435f8694-kube-api-access-mc5q6" (OuterVolumeSpecName: "kube-api-access-mc5q6") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "kube-api-access-mc5q6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.058604 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "9b104695-0850-4fb3-b2f8-f764435f8694" (UID: "9b104695-0850-4fb3-b2f8-f764435f8694"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149702 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-var-lib-openvswitch\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149757 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-run-netns\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149782 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d591ad07-c69d-458c-9b18-b09c467be552-ovnkube-script-lib\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149798 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-run-systemd\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149816 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149852 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-systemd-units\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149867 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-etc-openvswitch\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149889 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149904 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-run-systemd\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149927 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-etc-openvswitch\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149846 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-var-lib-openvswitch\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149938 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-systemd-units\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149902 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-run-netns\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.149897 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fmtr\" (UniqueName: \"kubernetes.io/projected/d591ad07-c69d-458c-9b18-b09c467be552-kube-api-access-4fmtr\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150127 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-cni-netd\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150216 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-cni-netd\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150158 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-kubelet\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150301 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-kubelet\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150323 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-node-log\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150392 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-node-log\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150361 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-run-ovn-kubernetes\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150436 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d591ad07-c69d-458c-9b18-b09c467be552-ovn-node-metrics-cert\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150480 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-run-openvswitch\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150487 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-run-ovn-kubernetes\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150501 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-run-ovn\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150551 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-run-openvswitch\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150571 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d591ad07-c69d-458c-9b18-b09c467be552-env-overrides\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150584 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-run-ovn\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150604 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-log-socket\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150639 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-slash\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150664 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d591ad07-c69d-458c-9b18-b09c467be552-ovnkube-config\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150680 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-log-socket\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150707 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-slash\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150752 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-cni-bin\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150850 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d591ad07-c69d-458c-9b18-b09c467be552-host-cni-bin\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150822 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mc5q6\" (UniqueName: \"kubernetes.io/projected/9b104695-0850-4fb3-b2f8-f764435f8694-kube-api-access-mc5q6\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150916 4492 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b104695-0850-4fb3-b2f8-f764435f8694-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150940 4492 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b104695-0850-4fb3-b2f8-f764435f8694-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.150982 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d591ad07-c69d-458c-9b18-b09c467be552-env-overrides\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.151214 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d591ad07-c69d-458c-9b18-b09c467be552-ovnkube-config\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.151491 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d591ad07-c69d-458c-9b18-b09c467be552-ovnkube-script-lib\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.153863 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d591ad07-c69d-458c-9b18-b09c467be552-ovn-node-metrics-cert\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.164474 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fmtr\" (UniqueName: \"kubernetes.io/projected/d591ad07-c69d-458c-9b18-b09c467be552-kube-api-access-4fmtr\") pod \"ovnkube-node-w2769\" (UID: \"d591ad07-c69d-458c-9b18-b09c467be552\") " pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.291056 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:15 crc kubenswrapper[4492]: W1126 06:59:15.307974 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd591ad07_c69d_458c_9b18_b09c467be552.slice/crio-b67f98535ade04362f8c22a772f8f11ab6f49c626457d8d8b72d04ae3a352501 WatchSource:0}: Error finding container b67f98535ade04362f8c22a772f8f11ab6f49c626457d8d8b72d04ae3a352501: Status 404 returned error can't find the container with id b67f98535ade04362f8c22a772f8f11ab6f49c626457d8d8b72d04ae3a352501 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.379966 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovnkube-controller/3.log" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381403 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovn-acl-logging/0.log" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381687 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-lghgp_9b104695-0850-4fb3-b2f8-f764435f8694/ovn-controller/0.log" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381926 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334" exitCode=0 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381950 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39" exitCode=0 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381958 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046" exitCode=0 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381964 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71" exitCode=0 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381972 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803" exitCode=0 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381978 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097" exitCode=0 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381984 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc" exitCode=143 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.381990 4492 generic.go:334] "Generic (PLEG): container finished" podID="9b104695-0850-4fb3-b2f8-f764435f8694" containerID="1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e" exitCode=143 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382024 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382048 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382059 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382067 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382076 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382086 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382095 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382105 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382109 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382114 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382119 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382123 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382127 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382131 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382135 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382141 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382148 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382153 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382157 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382162 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382166 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382189 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382196 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382200 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382205 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382209 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382216 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382224 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382229 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382234 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382238 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382243 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382247 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382252 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382256 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382261 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382265 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382271 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" event={"ID":"9b104695-0850-4fb3-b2f8-f764435f8694","Type":"ContainerDied","Data":"af14b9a6ebbe92710db288ededc518253b4cd26ee8e5926763078dc46ce49b07"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382277 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382283 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382288 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382293 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382297 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382302 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382306 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382310 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382315 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382319 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382331 4492 scope.go:117] "RemoveContainer" containerID="4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.382436 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-lghgp" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.385390 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5bshd_a471ac3f-0ac0-4110-94bb-194c0de0af26/kube-multus/1.log" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.385748 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5bshd_a471ac3f-0ac0-4110-94bb-194c0de0af26/kube-multus/0.log" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.385779 4492 generic.go:334] "Generic (PLEG): container finished" podID="a471ac3f-0ac0-4110-94bb-194c0de0af26" containerID="d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3" exitCode=2 Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.385817 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5bshd" event={"ID":"a471ac3f-0ac0-4110-94bb-194c0de0af26","Type":"ContainerDied","Data":"d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.385832 4492 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.386232 4492 scope.go:117] "RemoveContainer" containerID="d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.386549 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-5bshd_openshift-multus(a471ac3f-0ac0-4110-94bb-194c0de0af26)\"" pod="openshift-multus/multus-5bshd" podUID="a471ac3f-0ac0-4110-94bb-194c0de0af26" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.388831 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"9b3f23dc8275f2e69507ec12a3d68526a2d62f1c08f5a3fb19e7063522b87c9c"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.388856 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"b67f98535ade04362f8c22a772f8f11ab6f49c626457d8d8b72d04ae3a352501"} Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.426544 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.441425 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lghgp"] Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.446630 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-lghgp"] Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.452553 4492 scope.go:117] "RemoveContainer" containerID="dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.463891 4492 scope.go:117] "RemoveContainer" containerID="dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.473383 4492 scope.go:117] "RemoveContainer" containerID="501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.482336 4492 scope.go:117] "RemoveContainer" containerID="738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.499988 4492 scope.go:117] "RemoveContainer" containerID="5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.509818 4492 scope.go:117] "RemoveContainer" containerID="0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.522836 4492 scope.go:117] "RemoveContainer" containerID="1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.540616 4492 scope.go:117] "RemoveContainer" containerID="4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.553018 4492 scope.go:117] "RemoveContainer" containerID="4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.553306 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": container with ID starting with 4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334 not found: ID does not exist" containerID="4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.553335 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} err="failed to get container status \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": rpc error: code = NotFound desc = could not find container \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": container with ID starting with 4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.553354 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.553581 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\": container with ID starting with c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c not found: ID does not exist" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.553620 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} err="failed to get container status \"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\": rpc error: code = NotFound desc = could not find container \"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\": container with ID starting with c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.553635 4492 scope.go:117] "RemoveContainer" containerID="dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.553806 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\": container with ID starting with dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39 not found: ID does not exist" containerID="dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.553826 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} err="failed to get container status \"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\": rpc error: code = NotFound desc = could not find container \"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\": container with ID starting with dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.553839 4492 scope.go:117] "RemoveContainer" containerID="dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.554012 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\": container with ID starting with dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046 not found: ID does not exist" containerID="dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.554030 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} err="failed to get container status \"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\": rpc error: code = NotFound desc = could not find container \"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\": container with ID starting with dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.554041 4492 scope.go:117] "RemoveContainer" containerID="501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.554272 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\": container with ID starting with 501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71 not found: ID does not exist" containerID="501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.554292 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} err="failed to get container status \"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\": rpc error: code = NotFound desc = could not find container \"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\": container with ID starting with 501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.554303 4492 scope.go:117] "RemoveContainer" containerID="738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.554544 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\": container with ID starting with 738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803 not found: ID does not exist" containerID="738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.554576 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} err="failed to get container status \"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\": rpc error: code = NotFound desc = could not find container \"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\": container with ID starting with 738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.554589 4492 scope.go:117] "RemoveContainer" containerID="5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.555738 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\": container with ID starting with 5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097 not found: ID does not exist" containerID="5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.555782 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} err="failed to get container status \"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\": rpc error: code = NotFound desc = could not find container \"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\": container with ID starting with 5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.555812 4492 scope.go:117] "RemoveContainer" containerID="0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.556084 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\": container with ID starting with 0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc not found: ID does not exist" containerID="0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.556105 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} err="failed to get container status \"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\": rpc error: code = NotFound desc = could not find container \"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\": container with ID starting with 0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.556118 4492 scope.go:117] "RemoveContainer" containerID="1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.556368 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\": container with ID starting with 1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e not found: ID does not exist" containerID="1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.556388 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} err="failed to get container status \"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\": rpc error: code = NotFound desc = could not find container \"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\": container with ID starting with 1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.556401 4492 scope.go:117] "RemoveContainer" containerID="4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43" Nov 26 06:59:15 crc kubenswrapper[4492]: E1126 06:59:15.556574 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\": container with ID starting with 4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43 not found: ID does not exist" containerID="4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.556591 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43"} err="failed to get container status \"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\": rpc error: code = NotFound desc = could not find container \"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\": container with ID starting with 4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.556603 4492 scope.go:117] "RemoveContainer" containerID="4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.556768 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} err="failed to get container status \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": rpc error: code = NotFound desc = could not find container \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": container with ID starting with 4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.556786 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.557320 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} err="failed to get container status \"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\": rpc error: code = NotFound desc = could not find container \"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\": container with ID starting with c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.557354 4492 scope.go:117] "RemoveContainer" containerID="dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.557584 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} err="failed to get container status \"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\": rpc error: code = NotFound desc = could not find container \"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\": container with ID starting with dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.557622 4492 scope.go:117] "RemoveContainer" containerID="dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.557844 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} err="failed to get container status \"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\": rpc error: code = NotFound desc = could not find container \"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\": container with ID starting with dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.557860 4492 scope.go:117] "RemoveContainer" containerID="501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.558086 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} err="failed to get container status \"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\": rpc error: code = NotFound desc = could not find container \"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\": container with ID starting with 501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.558103 4492 scope.go:117] "RemoveContainer" containerID="738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.558411 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} err="failed to get container status \"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\": rpc error: code = NotFound desc = could not find container \"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\": container with ID starting with 738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.558449 4492 scope.go:117] "RemoveContainer" containerID="5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.558621 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} err="failed to get container status \"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\": rpc error: code = NotFound desc = could not find container \"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\": container with ID starting with 5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.558641 4492 scope.go:117] "RemoveContainer" containerID="0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.559279 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} err="failed to get container status \"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\": rpc error: code = NotFound desc = could not find container \"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\": container with ID starting with 0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.559300 4492 scope.go:117] "RemoveContainer" containerID="1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.560608 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} err="failed to get container status \"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\": rpc error: code = NotFound desc = could not find container \"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\": container with ID starting with 1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.560652 4492 scope.go:117] "RemoveContainer" containerID="4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.560869 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43"} err="failed to get container status \"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\": rpc error: code = NotFound desc = could not find container \"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\": container with ID starting with 4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.560912 4492 scope.go:117] "RemoveContainer" containerID="4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.561214 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} err="failed to get container status \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": rpc error: code = NotFound desc = could not find container \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": container with ID starting with 4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.561234 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.561528 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} err="failed to get container status \"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\": rpc error: code = NotFound desc = could not find container \"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\": container with ID starting with c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.561554 4492 scope.go:117] "RemoveContainer" containerID="dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.561775 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} err="failed to get container status \"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\": rpc error: code = NotFound desc = could not find container \"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\": container with ID starting with dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.561797 4492 scope.go:117] "RemoveContainer" containerID="dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.561981 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} err="failed to get container status \"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\": rpc error: code = NotFound desc = could not find container \"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\": container with ID starting with dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.562062 4492 scope.go:117] "RemoveContainer" containerID="501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.562428 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} err="failed to get container status \"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\": rpc error: code = NotFound desc = could not find container \"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\": container with ID starting with 501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.562513 4492 scope.go:117] "RemoveContainer" containerID="738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.562861 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} err="failed to get container status \"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\": rpc error: code = NotFound desc = could not find container \"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\": container with ID starting with 738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.562888 4492 scope.go:117] "RemoveContainer" containerID="5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.563125 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} err="failed to get container status \"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\": rpc error: code = NotFound desc = could not find container \"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\": container with ID starting with 5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.563270 4492 scope.go:117] "RemoveContainer" containerID="0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.564592 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} err="failed to get container status \"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\": rpc error: code = NotFound desc = could not find container \"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\": container with ID starting with 0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.564637 4492 scope.go:117] "RemoveContainer" containerID="1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.565272 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} err="failed to get container status \"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\": rpc error: code = NotFound desc = could not find container \"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\": container with ID starting with 1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.565355 4492 scope.go:117] "RemoveContainer" containerID="4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.565834 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43"} err="failed to get container status \"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\": rpc error: code = NotFound desc = could not find container \"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\": container with ID starting with 4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.565854 4492 scope.go:117] "RemoveContainer" containerID="4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.566086 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} err="failed to get container status \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": rpc error: code = NotFound desc = could not find container \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": container with ID starting with 4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.566103 4492 scope.go:117] "RemoveContainer" containerID="c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.566320 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c"} err="failed to get container status \"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\": rpc error: code = NotFound desc = could not find container \"c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c\": container with ID starting with c0888a57c383ad8c0a363ee29bc31841ce4175cb3aeb1b825da7efd383d4265c not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.566405 4492 scope.go:117] "RemoveContainer" containerID="dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.566658 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39"} err="failed to get container status \"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\": rpc error: code = NotFound desc = could not find container \"dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39\": container with ID starting with dcc2ea28d7ab1193d0836b56704a623efefea918b62fde4a41c3d5466a531a39 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.566703 4492 scope.go:117] "RemoveContainer" containerID="dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.566926 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046"} err="failed to get container status \"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\": rpc error: code = NotFound desc = could not find container \"dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046\": container with ID starting with dba16d525497282969e2ead66e1d4886793234d0f4d2ef143a8246329131b046 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.566945 4492 scope.go:117] "RemoveContainer" containerID="501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.567147 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71"} err="failed to get container status \"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\": rpc error: code = NotFound desc = could not find container \"501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71\": container with ID starting with 501133ebc0e93cbba7bea713d813e6f41caa28286ed59ae8d328b6f9050a1e71 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.567179 4492 scope.go:117] "RemoveContainer" containerID="738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.567343 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803"} err="failed to get container status \"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\": rpc error: code = NotFound desc = could not find container \"738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803\": container with ID starting with 738cf2a56bd431ea3dcd29c8a6e66881e618cf6592af8ce1852be9776c41f803 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.567361 4492 scope.go:117] "RemoveContainer" containerID="5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.567572 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097"} err="failed to get container status \"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\": rpc error: code = NotFound desc = could not find container \"5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097\": container with ID starting with 5de69ea04f427c1f151433ee53b119dfa7e3cd603f924ebca340c5050ff7d097 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.567591 4492 scope.go:117] "RemoveContainer" containerID="0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.567837 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc"} err="failed to get container status \"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\": rpc error: code = NotFound desc = could not find container \"0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc\": container with ID starting with 0301a329531b99f93a3e91f672edc4ce17ae03cfd5562f5802362423f11670fc not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.567857 4492 scope.go:117] "RemoveContainer" containerID="1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.568063 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e"} err="failed to get container status \"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\": rpc error: code = NotFound desc = could not find container \"1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e\": container with ID starting with 1519223368c78fc663c8f6674135a7c574425dccbd0151d6663836b4715e9f9e not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.568082 4492 scope.go:117] "RemoveContainer" containerID="4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.568375 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43"} err="failed to get container status \"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\": rpc error: code = NotFound desc = could not find container \"4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43\": container with ID starting with 4001ee998f435fc6249ef42a3a6a9a8cf4a5031eb8999158ba3ba6bfa41a4a43 not found: ID does not exist" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.568396 4492 scope.go:117] "RemoveContainer" containerID="4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334" Nov 26 06:59:15 crc kubenswrapper[4492]: I1126 06:59:15.568597 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334"} err="failed to get container status \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": rpc error: code = NotFound desc = could not find container \"4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334\": container with ID starting with 4ea0c5b21e02525e24489f438a88ad9cea344b6d4d46a7887baf8dc07c82a334 not found: ID does not exist" Nov 26 06:59:16 crc kubenswrapper[4492]: I1126 06:59:16.406639 4492 generic.go:334] "Generic (PLEG): container finished" podID="d591ad07-c69d-458c-9b18-b09c467be552" containerID="9b3f23dc8275f2e69507ec12a3d68526a2d62f1c08f5a3fb19e7063522b87c9c" exitCode=0 Nov 26 06:59:16 crc kubenswrapper[4492]: I1126 06:59:16.406842 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerDied","Data":"9b3f23dc8275f2e69507ec12a3d68526a2d62f1c08f5a3fb19e7063522b87c9c"} Nov 26 06:59:16 crc kubenswrapper[4492]: I1126 06:59:16.406903 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"d1e9ca0a19b0254277cd61a45519f691b4252a82780ebce14f735ce9c95ef2d3"} Nov 26 06:59:16 crc kubenswrapper[4492]: I1126 06:59:16.406917 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"34b7ea64388b2b0a255f45347479805f6074c4510453e15d9e50fa08692c89ce"} Nov 26 06:59:16 crc kubenswrapper[4492]: I1126 06:59:16.406927 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"45f1f1fa9f9d6b3718455d17a6a39ff82ba834f9604df1b59258ca84a80fb940"} Nov 26 06:59:16 crc kubenswrapper[4492]: I1126 06:59:16.406935 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"a37c9b591ae7b072577573c54ad90dc6018ff08f5eed7ece36b207357da7fd63"} Nov 26 06:59:16 crc kubenswrapper[4492]: I1126 06:59:16.406942 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"3fa05c95059699366597cedafe3b92e783cc8d83af242abb2977b89ecffd0146"} Nov 26 06:59:16 crc kubenswrapper[4492]: I1126 06:59:16.406950 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"aab619c787da83481e1fe4e00c832ce39ddda972ad5cf9a21f6bb7f4999d76fa"} Nov 26 06:59:16 crc kubenswrapper[4492]: I1126 06:59:16.443129 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b104695-0850-4fb3-b2f8-f764435f8694" path="/var/lib/kubelet/pods/9b104695-0850-4fb3-b2f8-f764435f8694/volumes" Nov 26 06:59:18 crc kubenswrapper[4492]: I1126 06:59:18.419137 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"03d294fbce670864fc83fd50bb37fad4a29f63fb166c56c1fbd06bc9afa3fe57"} Nov 26 06:59:20 crc kubenswrapper[4492]: I1126 06:59:20.430315 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" event={"ID":"d591ad07-c69d-458c-9b18-b09c467be552","Type":"ContainerStarted","Data":"58d7a8cdbe029e763d7b57e33424f23fa2fa7a9426927f7d0f8d2d8c51ffb226"} Nov 26 06:59:20 crc kubenswrapper[4492]: I1126 06:59:20.430653 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:20 crc kubenswrapper[4492]: I1126 06:59:20.430664 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:20 crc kubenswrapper[4492]: I1126 06:59:20.430672 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:20 crc kubenswrapper[4492]: I1126 06:59:20.450367 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:20 crc kubenswrapper[4492]: I1126 06:59:20.459235 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" podStartSLOduration=6.459225729 podStartE2EDuration="6.459225729s" podCreationTimestamp="2025-11-26 06:59:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:59:20.455626251 +0000 UTC m=+656.339514549" watchObservedRunningTime="2025-11-26 06:59:20.459225729 +0000 UTC m=+656.343114026" Nov 26 06:59:20 crc kubenswrapper[4492]: I1126 06:59:20.462801 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:24 crc kubenswrapper[4492]: I1126 06:59:24.781807 4492 scope.go:117] "RemoveContainer" containerID="9a91fba8751c53be54b0060bfc75906ab11b521770ca44425d8910fa13027c9d" Nov 26 06:59:25 crc kubenswrapper[4492]: I1126 06:59:25.463732 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5bshd_a471ac3f-0ac0-4110-94bb-194c0de0af26/kube-multus/1.log" Nov 26 06:59:27 crc kubenswrapper[4492]: I1126 06:59:27.438588 4492 scope.go:117] "RemoveContainer" containerID="d93d67e2acbb2cff41dd4d5bc98ffe7cd68f7fd393e4fab2fc974ae4de103ca3" Nov 26 06:59:28 crc kubenswrapper[4492]: I1126 06:59:28.475896 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5bshd_a471ac3f-0ac0-4110-94bb-194c0de0af26/kube-multus/1.log" Nov 26 06:59:28 crc kubenswrapper[4492]: I1126 06:59:28.476112 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5bshd" event={"ID":"a471ac3f-0ac0-4110-94bb-194c0de0af26","Type":"ContainerStarted","Data":"0c0a031afbeac09bbc17f45ebf7307c12ebd07dc693c7f7bbb155d09a555d07e"} Nov 26 06:59:41 crc kubenswrapper[4492]: I1126 06:59:41.814788 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn"] Nov 26 06:59:41 crc kubenswrapper[4492]: I1126 06:59:41.817491 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:41 crc kubenswrapper[4492]: I1126 06:59:41.819360 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 06:59:41 crc kubenswrapper[4492]: I1126 06:59:41.825399 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn"] Nov 26 06:59:41 crc kubenswrapper[4492]: I1126 06:59:41.909662 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:41 crc kubenswrapper[4492]: I1126 06:59:41.909813 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:41 crc kubenswrapper[4492]: I1126 06:59:41.909850 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdw86\" (UniqueName: \"kubernetes.io/projected/7ea3b65a-f267-4701-9e64-bb189ed62061-kube-api-access-vdw86\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:42 crc kubenswrapper[4492]: I1126 06:59:42.010566 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:42 crc kubenswrapper[4492]: I1126 06:59:42.010631 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdw86\" (UniqueName: \"kubernetes.io/projected/7ea3b65a-f267-4701-9e64-bb189ed62061-kube-api-access-vdw86\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:42 crc kubenswrapper[4492]: I1126 06:59:42.010700 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:42 crc kubenswrapper[4492]: I1126 06:59:42.010999 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:42 crc kubenswrapper[4492]: I1126 06:59:42.011160 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:42 crc kubenswrapper[4492]: I1126 06:59:42.029893 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdw86\" (UniqueName: \"kubernetes.io/projected/7ea3b65a-f267-4701-9e64-bb189ed62061-kube-api-access-vdw86\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:42 crc kubenswrapper[4492]: I1126 06:59:42.134244 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:42 crc kubenswrapper[4492]: I1126 06:59:42.513739 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn"] Nov 26 06:59:42 crc kubenswrapper[4492]: I1126 06:59:42.534554 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" event={"ID":"7ea3b65a-f267-4701-9e64-bb189ed62061","Type":"ContainerStarted","Data":"443d8252b4335a26759415cbe54434c369f53ba7cb354ec20c4f726c7c6dd548"} Nov 26 06:59:43 crc kubenswrapper[4492]: I1126 06:59:43.541678 4492 generic.go:334] "Generic (PLEG): container finished" podID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerID="2e3adf8f24391834c65884d61dc9e76f67fd4c5b7afc7a716a07dbddb13d1cb3" exitCode=0 Nov 26 06:59:43 crc kubenswrapper[4492]: I1126 06:59:43.541724 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" event={"ID":"7ea3b65a-f267-4701-9e64-bb189ed62061","Type":"ContainerDied","Data":"2e3adf8f24391834c65884d61dc9e76f67fd4c5b7afc7a716a07dbddb13d1cb3"} Nov 26 06:59:45 crc kubenswrapper[4492]: I1126 06:59:45.308387 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-w2769" Nov 26 06:59:45 crc kubenswrapper[4492]: I1126 06:59:45.554506 4492 generic.go:334] "Generic (PLEG): container finished" podID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerID="ac9d0117985e03a75c6c98b8e32f1b867766df293368ae4bb3aa7b1572baa514" exitCode=0 Nov 26 06:59:45 crc kubenswrapper[4492]: I1126 06:59:45.554550 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" event={"ID":"7ea3b65a-f267-4701-9e64-bb189ed62061","Type":"ContainerDied","Data":"ac9d0117985e03a75c6c98b8e32f1b867766df293368ae4bb3aa7b1572baa514"} Nov 26 06:59:46 crc kubenswrapper[4492]: I1126 06:59:46.561599 4492 generic.go:334] "Generic (PLEG): container finished" podID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerID="dcd952badeecc2f35f033d78134c6d11e00d531f3e509835c77f5dbb2a2b3173" exitCode=0 Nov 26 06:59:46 crc kubenswrapper[4492]: I1126 06:59:46.561650 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" event={"ID":"7ea3b65a-f267-4701-9e64-bb189ed62061","Type":"ContainerDied","Data":"dcd952badeecc2f35f033d78134c6d11e00d531f3e509835c77f5dbb2a2b3173"} Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.755291 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.783432 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-bundle\") pod \"7ea3b65a-f267-4701-9e64-bb189ed62061\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.783529 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdw86\" (UniqueName: \"kubernetes.io/projected/7ea3b65a-f267-4701-9e64-bb189ed62061-kube-api-access-vdw86\") pod \"7ea3b65a-f267-4701-9e64-bb189ed62061\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.783667 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-util\") pod \"7ea3b65a-f267-4701-9e64-bb189ed62061\" (UID: \"7ea3b65a-f267-4701-9e64-bb189ed62061\") " Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.784008 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-bundle" (OuterVolumeSpecName: "bundle") pod "7ea3b65a-f267-4701-9e64-bb189ed62061" (UID: "7ea3b65a-f267-4701-9e64-bb189ed62061"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.784420 4492 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.788667 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ea3b65a-f267-4701-9e64-bb189ed62061-kube-api-access-vdw86" (OuterVolumeSpecName: "kube-api-access-vdw86") pod "7ea3b65a-f267-4701-9e64-bb189ed62061" (UID: "7ea3b65a-f267-4701-9e64-bb189ed62061"). InnerVolumeSpecName "kube-api-access-vdw86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.793135 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-util" (OuterVolumeSpecName: "util") pod "7ea3b65a-f267-4701-9e64-bb189ed62061" (UID: "7ea3b65a-f267-4701-9e64-bb189ed62061"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.885268 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdw86\" (UniqueName: \"kubernetes.io/projected/7ea3b65a-f267-4701-9e64-bb189ed62061-kube-api-access-vdw86\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:47 crc kubenswrapper[4492]: I1126 06:59:47.885303 4492 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ea3b65a-f267-4701-9e64-bb189ed62061-util\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:48 crc kubenswrapper[4492]: I1126 06:59:48.572690 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" event={"ID":"7ea3b65a-f267-4701-9e64-bb189ed62061","Type":"ContainerDied","Data":"443d8252b4335a26759415cbe54434c369f53ba7cb354ec20c4f726c7c6dd548"} Nov 26 06:59:48 crc kubenswrapper[4492]: I1126 06:59:48.572740 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="443d8252b4335a26759415cbe54434c369f53ba7cb354ec20c4f726c7c6dd548" Nov 26 06:59:48 crc kubenswrapper[4492]: I1126 06:59:48.572772 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el2zdn" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.810706 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5qf2s"] Nov 26 06:59:49 crc kubenswrapper[4492]: E1126 06:59:49.811308 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerName="pull" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.811320 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerName="pull" Nov 26 06:59:49 crc kubenswrapper[4492]: E1126 06:59:49.811329 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerName="extract" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.811335 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerName="extract" Nov 26 06:59:49 crc kubenswrapper[4492]: E1126 06:59:49.811350 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerName="util" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.811355 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerName="util" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.811447 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ea3b65a-f267-4701-9e64-bb189ed62061" containerName="extract" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.811820 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-5qf2s" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.814156 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.815610 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-mvxpz" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.815920 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.826368 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5qf2s"] Nov 26 06:59:49 crc kubenswrapper[4492]: I1126 06:59:49.905616 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j56z\" (UniqueName: \"kubernetes.io/projected/dbed63fb-3c6e-4473-899a-74a47a79d3ee-kube-api-access-2j56z\") pod \"nmstate-operator-557fdffb88-5qf2s\" (UID: \"dbed63fb-3c6e-4473-899a-74a47a79d3ee\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5qf2s" Nov 26 06:59:50 crc kubenswrapper[4492]: I1126 06:59:50.006634 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2j56z\" (UniqueName: \"kubernetes.io/projected/dbed63fb-3c6e-4473-899a-74a47a79d3ee-kube-api-access-2j56z\") pod \"nmstate-operator-557fdffb88-5qf2s\" (UID: \"dbed63fb-3c6e-4473-899a-74a47a79d3ee\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5qf2s" Nov 26 06:59:50 crc kubenswrapper[4492]: I1126 06:59:50.031744 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2j56z\" (UniqueName: \"kubernetes.io/projected/dbed63fb-3c6e-4473-899a-74a47a79d3ee-kube-api-access-2j56z\") pod \"nmstate-operator-557fdffb88-5qf2s\" (UID: \"dbed63fb-3c6e-4473-899a-74a47a79d3ee\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5qf2s" Nov 26 06:59:50 crc kubenswrapper[4492]: I1126 06:59:50.125567 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-5qf2s" Nov 26 06:59:50 crc kubenswrapper[4492]: I1126 06:59:50.292376 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5qf2s"] Nov 26 06:59:50 crc kubenswrapper[4492]: W1126 06:59:50.296112 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbed63fb_3c6e_4473_899a_74a47a79d3ee.slice/crio-9ba14111c4afb24ad799c15b5ba457d84c8c283eb5a9d36daac6dfda29228860 WatchSource:0}: Error finding container 9ba14111c4afb24ad799c15b5ba457d84c8c283eb5a9d36daac6dfda29228860: Status 404 returned error can't find the container with id 9ba14111c4afb24ad799c15b5ba457d84c8c283eb5a9d36daac6dfda29228860 Nov 26 06:59:50 crc kubenswrapper[4492]: I1126 06:59:50.582616 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-5qf2s" event={"ID":"dbed63fb-3c6e-4473-899a-74a47a79d3ee","Type":"ContainerStarted","Data":"9ba14111c4afb24ad799c15b5ba457d84c8c283eb5a9d36daac6dfda29228860"} Nov 26 06:59:53 crc kubenswrapper[4492]: I1126 06:59:53.602104 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-5qf2s" event={"ID":"dbed63fb-3c6e-4473-899a-74a47a79d3ee","Type":"ContainerStarted","Data":"c21f6e233ab7eab0605b4ead5ce3c766478d53014bb13c4ab79653696767c803"} Nov 26 06:59:53 crc kubenswrapper[4492]: I1126 06:59:53.618858 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-5qf2s" podStartSLOduration=2.327482998 podStartE2EDuration="4.61883914s" podCreationTimestamp="2025-11-26 06:59:49 +0000 UTC" firstStartedPulling="2025-11-26 06:59:50.300458059 +0000 UTC m=+686.184346357" lastFinishedPulling="2025-11-26 06:59:52.591814201 +0000 UTC m=+688.475702499" observedRunningTime="2025-11-26 06:59:53.616226188 +0000 UTC m=+689.500114486" watchObservedRunningTime="2025-11-26 06:59:53.61883914 +0000 UTC m=+689.502727439" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.381087 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx"] Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.381922 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.392643 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-tbb4c" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.394357 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx"] Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.408066 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-74hxm"] Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.408805 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.415682 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj"] Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.416835 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.418604 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.449924 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj"] Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.531540 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm"] Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.538405 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.541129 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.541829 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-z6llm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.542151 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.558930 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-ovs-socket\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.559147 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8a2f012a-c6b0-4ac6-afd4-0d78f516810f-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-c6rbj\" (UID: \"8a2f012a-c6b0-4ac6-afd4-0d78f516810f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.560753 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnj79\" (UniqueName: \"kubernetes.io/projected/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-kube-api-access-bnj79\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.561031 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-dbus-socket\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.561202 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs52s\" (UniqueName: \"kubernetes.io/projected/8a2f012a-c6b0-4ac6-afd4-0d78f516810f-kube-api-access-rs52s\") pod \"nmstate-webhook-6b89b748d8-c6rbj\" (UID: \"8a2f012a-c6b0-4ac6-afd4-0d78f516810f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.561303 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-nmstate-lock\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.561336 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbrlf\" (UniqueName: \"kubernetes.io/projected/af4d39de-b0b8-473b-89e4-e1b7d7db0a81-kube-api-access-vbrlf\") pod \"nmstate-metrics-5dcf9c57c5-5swnx\" (UID: \"af4d39de-b0b8-473b-89e4-e1b7d7db0a81\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.562033 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm"] Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.663324 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-ovs-socket\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.663428 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8a2f012a-c6b0-4ac6-afd4-0d78f516810f-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-c6rbj\" (UID: \"8a2f012a-c6b0-4ac6-afd4-0d78f516810f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.663430 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-ovs-socket\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.663478 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1b1d575-ebe5-44c6-b7eb-088510696be0-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-qtzxm\" (UID: \"b1b1d575-ebe5-44c6-b7eb-088510696be0\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.663535 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b1b1d575-ebe5-44c6-b7eb-088510696be0-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-qtzxm\" (UID: \"b1b1d575-ebe5-44c6-b7eb-088510696be0\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.663576 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pb9c\" (UniqueName: \"kubernetes.io/projected/b1b1d575-ebe5-44c6-b7eb-088510696be0-kube-api-access-2pb9c\") pod \"nmstate-console-plugin-5874bd7bc5-qtzxm\" (UID: \"b1b1d575-ebe5-44c6-b7eb-088510696be0\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.663745 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnj79\" (UniqueName: \"kubernetes.io/projected/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-kube-api-access-bnj79\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.663847 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-dbus-socket\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.663969 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs52s\" (UniqueName: \"kubernetes.io/projected/8a2f012a-c6b0-4ac6-afd4-0d78f516810f-kube-api-access-rs52s\") pod \"nmstate-webhook-6b89b748d8-c6rbj\" (UID: \"8a2f012a-c6b0-4ac6-afd4-0d78f516810f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.664085 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-nmstate-lock\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.664151 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbrlf\" (UniqueName: \"kubernetes.io/projected/af4d39de-b0b8-473b-89e4-e1b7d7db0a81-kube-api-access-vbrlf\") pod \"nmstate-metrics-5dcf9c57c5-5swnx\" (UID: \"af4d39de-b0b8-473b-89e4-e1b7d7db0a81\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.664264 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-dbus-socket\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.664284 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-nmstate-lock\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.672602 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8a2f012a-c6b0-4ac6-afd4-0d78f516810f-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-c6rbj\" (UID: \"8a2f012a-c6b0-4ac6-afd4-0d78f516810f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.692283 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs52s\" (UniqueName: \"kubernetes.io/projected/8a2f012a-c6b0-4ac6-afd4-0d78f516810f-kube-api-access-rs52s\") pod \"nmstate-webhook-6b89b748d8-c6rbj\" (UID: \"8a2f012a-c6b0-4ac6-afd4-0d78f516810f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.697073 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnj79\" (UniqueName: \"kubernetes.io/projected/3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4-kube-api-access-bnj79\") pod \"nmstate-handler-74hxm\" (UID: \"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4\") " pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.711364 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbrlf\" (UniqueName: \"kubernetes.io/projected/af4d39de-b0b8-473b-89e4-e1b7d7db0a81-kube-api-access-vbrlf\") pod \"nmstate-metrics-5dcf9c57c5-5swnx\" (UID: \"af4d39de-b0b8-473b-89e4-e1b7d7db0a81\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.727163 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.733071 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.766680 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1b1d575-ebe5-44c6-b7eb-088510696be0-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-qtzxm\" (UID: \"b1b1d575-ebe5-44c6-b7eb-088510696be0\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.766712 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b1b1d575-ebe5-44c6-b7eb-088510696be0-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-qtzxm\" (UID: \"b1b1d575-ebe5-44c6-b7eb-088510696be0\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.766736 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pb9c\" (UniqueName: \"kubernetes.io/projected/b1b1d575-ebe5-44c6-b7eb-088510696be0-kube-api-access-2pb9c\") pod \"nmstate-console-plugin-5874bd7bc5-qtzxm\" (UID: \"b1b1d575-ebe5-44c6-b7eb-088510696be0\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.767676 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b1b1d575-ebe5-44c6-b7eb-088510696be0-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-qtzxm\" (UID: \"b1b1d575-ebe5-44c6-b7eb-088510696be0\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.775951 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1b1d575-ebe5-44c6-b7eb-088510696be0-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-qtzxm\" (UID: \"b1b1d575-ebe5-44c6-b7eb-088510696be0\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.796155 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pb9c\" (UniqueName: \"kubernetes.io/projected/b1b1d575-ebe5-44c6-b7eb-088510696be0-kube-api-access-2pb9c\") pod \"nmstate-console-plugin-5874bd7bc5-qtzxm\" (UID: \"b1b1d575-ebe5-44c6-b7eb-088510696be0\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.806980 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6b447cd8-tgxfq"] Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.807707 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.839693 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b447cd8-tgxfq"] Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.855370 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.871459 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-trusted-ca-bundle\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.871530 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-oauth-serving-cert\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.871562 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-service-ca\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.871636 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-console-config\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.871665 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgqsb\" (UniqueName: \"kubernetes.io/projected/3c256d9a-c01a-4c79-b774-43fafad51266-kube-api-access-kgqsb\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.871712 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c256d9a-c01a-4c79-b774-43fafad51266-console-oauth-config\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.871783 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c256d9a-c01a-4c79-b774-43fafad51266-console-serving-cert\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.972644 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-trusted-ca-bundle\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.972683 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-oauth-serving-cert\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.972698 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-service-ca\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.972730 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-console-config\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.972747 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgqsb\" (UniqueName: \"kubernetes.io/projected/3c256d9a-c01a-4c79-b774-43fafad51266-kube-api-access-kgqsb\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.972770 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c256d9a-c01a-4c79-b774-43fafad51266-console-oauth-config\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.972798 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c256d9a-c01a-4c79-b774-43fafad51266-console-serving-cert\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.973778 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-service-ca\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.974755 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-trusted-ca-bundle\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.975095 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-console-config\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.975245 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c256d9a-c01a-4c79-b774-43fafad51266-oauth-serving-cert\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.976865 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c256d9a-c01a-4c79-b774-43fafad51266-console-serving-cert\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.977430 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c256d9a-c01a-4c79-b774-43fafad51266-console-oauth-config\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.994278 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx" Nov 26 06:59:54 crc kubenswrapper[4492]: I1126 06:59:54.995110 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgqsb\" (UniqueName: \"kubernetes.io/projected/3c256d9a-c01a-4c79-b774-43fafad51266-kube-api-access-kgqsb\") pod \"console-6b447cd8-tgxfq\" (UID: \"3c256d9a-c01a-4c79-b774-43fafad51266\") " pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.012595 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj"] Nov 26 06:59:55 crc kubenswrapper[4492]: W1126 06:59:55.017262 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a2f012a_c6b0_4ac6_afd4_0d78f516810f.slice/crio-2ced61d8cb8568fcae6f29c7ebb881cf78bcb7fba53e721fe4d332e9fbea88a5 WatchSource:0}: Error finding container 2ced61d8cb8568fcae6f29c7ebb881cf78bcb7fba53e721fe4d332e9fbea88a5: Status 404 returned error can't find the container with id 2ced61d8cb8568fcae6f29c7ebb881cf78bcb7fba53e721fe4d332e9fbea88a5 Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.123874 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.178587 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx"] Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.283608 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm"] Nov 26 06:59:55 crc kubenswrapper[4492]: W1126 06:59:55.290248 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1b1d575_ebe5_44c6_b7eb_088510696be0.slice/crio-86f1949d7be870c383e9f877e7a037c84f0c7b79e49f3c496b3f191a953114bd WatchSource:0}: Error finding container 86f1949d7be870c383e9f877e7a037c84f0c7b79e49f3c496b3f191a953114bd: Status 404 returned error can't find the container with id 86f1949d7be870c383e9f877e7a037c84f0c7b79e49f3c496b3f191a953114bd Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.499714 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b447cd8-tgxfq"] Nov 26 06:59:55 crc kubenswrapper[4492]: W1126 06:59:55.506516 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c256d9a_c01a_4c79_b774_43fafad51266.slice/crio-8a7bcb86a2a6620340f349b374273f70b1621ff8adf29be9a6a5e26a8bf4fd85 WatchSource:0}: Error finding container 8a7bcb86a2a6620340f349b374273f70b1621ff8adf29be9a6a5e26a8bf4fd85: Status 404 returned error can't find the container with id 8a7bcb86a2a6620340f349b374273f70b1621ff8adf29be9a6a5e26a8bf4fd85 Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.616481 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx" event={"ID":"af4d39de-b0b8-473b-89e4-e1b7d7db0a81","Type":"ContainerStarted","Data":"249a93c86c40f24d7e141651c980c6c036ebde3f6e980ef33cb62601b864d35d"} Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.618739 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" event={"ID":"b1b1d575-ebe5-44c6-b7eb-088510696be0","Type":"ContainerStarted","Data":"86f1949d7be870c383e9f877e7a037c84f0c7b79e49f3c496b3f191a953114bd"} Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.620444 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b447cd8-tgxfq" event={"ID":"3c256d9a-c01a-4c79-b774-43fafad51266","Type":"ContainerStarted","Data":"8a7bcb86a2a6620340f349b374273f70b1621ff8adf29be9a6a5e26a8bf4fd85"} Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.621961 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" event={"ID":"8a2f012a-c6b0-4ac6-afd4-0d78f516810f","Type":"ContainerStarted","Data":"2ced61d8cb8568fcae6f29c7ebb881cf78bcb7fba53e721fe4d332e9fbea88a5"} Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.623134 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-74hxm" event={"ID":"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4","Type":"ContainerStarted","Data":"c8008032df2a1e7adbdfd4ce0f74c8c50ea7e9553d0e3ae26bead8bcfc4751eb"} Nov 26 06:59:55 crc kubenswrapper[4492]: I1126 06:59:55.638065 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6b447cd8-tgxfq" podStartSLOduration=1.6380389 podStartE2EDuration="1.6380389s" podCreationTimestamp="2025-11-26 06:59:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:59:55.633384078 +0000 UTC m=+691.517272377" watchObservedRunningTime="2025-11-26 06:59:55.6380389 +0000 UTC m=+691.521927198" Nov 26 06:59:56 crc kubenswrapper[4492]: I1126 06:59:56.633341 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b447cd8-tgxfq" event={"ID":"3c256d9a-c01a-4c79-b774-43fafad51266","Type":"ContainerStarted","Data":"2e8b8cfae8d0e9845c3c271cf27173dd066e21f436c31a3ffc6026c4d1ac38ad"} Nov 26 06:59:58 crc kubenswrapper[4492]: I1126 06:59:58.654812 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" event={"ID":"b1b1d575-ebe5-44c6-b7eb-088510696be0","Type":"ContainerStarted","Data":"5d521960186eab9f59dccc49857f19401a6e69e4de8d7ad3bc5aa2faaba3c23e"} Nov 26 06:59:58 crc kubenswrapper[4492]: I1126 06:59:58.657138 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" event={"ID":"8a2f012a-c6b0-4ac6-afd4-0d78f516810f","Type":"ContainerStarted","Data":"bac403548596de343adb3d4b65f04ffe6f3aec5fd641d1bfe4a3f9c89875257c"} Nov 26 06:59:58 crc kubenswrapper[4492]: I1126 06:59:58.657799 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 06:59:58 crc kubenswrapper[4492]: I1126 06:59:58.658991 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-74hxm" event={"ID":"3b91e88b-adf1-49da-bf66-cdf3e2cd5ca4","Type":"ContainerStarted","Data":"5aad142fc6951367fccb1aba580a4b550f89175beb554c8d6c99ab7f03d4b153"} Nov 26 06:59:58 crc kubenswrapper[4492]: I1126 06:59:58.659141 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 06:59:58 crc kubenswrapper[4492]: I1126 06:59:58.660163 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx" event={"ID":"af4d39de-b0b8-473b-89e4-e1b7d7db0a81","Type":"ContainerStarted","Data":"4d24aa54cd15db89b47955ff0e0c457a59edbba623483adfd92d3f9ef0938dd5"} Nov 26 06:59:58 crc kubenswrapper[4492]: I1126 06:59:58.671280 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-qtzxm" podStartSLOduration=2.409605971 podStartE2EDuration="4.671269148s" podCreationTimestamp="2025-11-26 06:59:54 +0000 UTC" firstStartedPulling="2025-11-26 06:59:55.297945691 +0000 UTC m=+691.181833989" lastFinishedPulling="2025-11-26 06:59:57.559608868 +0000 UTC m=+693.443497166" observedRunningTime="2025-11-26 06:59:58.670356333 +0000 UTC m=+694.554244630" watchObservedRunningTime="2025-11-26 06:59:58.671269148 +0000 UTC m=+694.555157446" Nov 26 06:59:58 crc kubenswrapper[4492]: I1126 06:59:58.686339 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" podStartSLOduration=2.147496426 podStartE2EDuration="4.686324092s" podCreationTimestamp="2025-11-26 06:59:54 +0000 UTC" firstStartedPulling="2025-11-26 06:59:55.020307221 +0000 UTC m=+690.904195519" lastFinishedPulling="2025-11-26 06:59:57.559134887 +0000 UTC m=+693.443023185" observedRunningTime="2025-11-26 06:59:58.684229936 +0000 UTC m=+694.568118224" watchObservedRunningTime="2025-11-26 06:59:58.686324092 +0000 UTC m=+694.570212391" Nov 26 06:59:58 crc kubenswrapper[4492]: I1126 06:59:58.706819 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-74hxm" podStartSLOduration=1.901598831 podStartE2EDuration="4.706771415s" podCreationTimestamp="2025-11-26 06:59:54 +0000 UTC" firstStartedPulling="2025-11-26 06:59:54.763160977 +0000 UTC m=+690.647049275" lastFinishedPulling="2025-11-26 06:59:57.568333561 +0000 UTC m=+693.452221859" observedRunningTime="2025-11-26 06:59:58.705056361 +0000 UTC m=+694.588944659" watchObservedRunningTime="2025-11-26 06:59:58.706771415 +0000 UTC m=+694.590659713" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.147089 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v"] Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.148161 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.149555 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.149676 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.157336 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v"] Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.245809 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b942b7c1-09d6-4424-a4b8-97f86a2a051f-config-volume\") pod \"collect-profiles-29402340-scf6v\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.245872 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnhk5\" (UniqueName: \"kubernetes.io/projected/b942b7c1-09d6-4424-a4b8-97f86a2a051f-kube-api-access-pnhk5\") pod \"collect-profiles-29402340-scf6v\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.245952 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b942b7c1-09d6-4424-a4b8-97f86a2a051f-secret-volume\") pod \"collect-profiles-29402340-scf6v\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.346732 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b942b7c1-09d6-4424-a4b8-97f86a2a051f-config-volume\") pod \"collect-profiles-29402340-scf6v\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.346797 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnhk5\" (UniqueName: \"kubernetes.io/projected/b942b7c1-09d6-4424-a4b8-97f86a2a051f-kube-api-access-pnhk5\") pod \"collect-profiles-29402340-scf6v\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.346839 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b942b7c1-09d6-4424-a4b8-97f86a2a051f-secret-volume\") pod \"collect-profiles-29402340-scf6v\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.347835 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b942b7c1-09d6-4424-a4b8-97f86a2a051f-config-volume\") pod \"collect-profiles-29402340-scf6v\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.353588 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b942b7c1-09d6-4424-a4b8-97f86a2a051f-secret-volume\") pod \"collect-profiles-29402340-scf6v\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.365576 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnhk5\" (UniqueName: \"kubernetes.io/projected/b942b7c1-09d6-4424-a4b8-97f86a2a051f-kube-api-access-pnhk5\") pod \"collect-profiles-29402340-scf6v\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.461117 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.675456 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx" event={"ID":"af4d39de-b0b8-473b-89e4-e1b7d7db0a81","Type":"ContainerStarted","Data":"ed50c1eb3295c2cf90764b27e38b8d37993f1d6d0470891a86f2e254b77e55de"} Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.695303 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-5swnx" podStartSLOduration=2.05286374 podStartE2EDuration="6.695275484s" podCreationTimestamp="2025-11-26 06:59:54 +0000 UTC" firstStartedPulling="2025-11-26 06:59:55.199602799 +0000 UTC m=+691.083491097" lastFinishedPulling="2025-11-26 06:59:59.842014553 +0000 UTC m=+695.725902841" observedRunningTime="2025-11-26 07:00:00.69038493 +0000 UTC m=+696.574273228" watchObservedRunningTime="2025-11-26 07:00:00.695275484 +0000 UTC m=+696.579163782" Nov 26 07:00:00 crc kubenswrapper[4492]: I1126 07:00:00.845817 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v"] Nov 26 07:00:01 crc kubenswrapper[4492]: I1126 07:00:01.681074 4492 generic.go:334] "Generic (PLEG): container finished" podID="b942b7c1-09d6-4424-a4b8-97f86a2a051f" containerID="03f75394e181f7125372b3bf6359b96389917a0e6ef13f9440fe3cd74b525237" exitCode=0 Nov 26 07:00:01 crc kubenswrapper[4492]: I1126 07:00:01.681197 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" event={"ID":"b942b7c1-09d6-4424-a4b8-97f86a2a051f","Type":"ContainerDied","Data":"03f75394e181f7125372b3bf6359b96389917a0e6ef13f9440fe3cd74b525237"} Nov 26 07:00:01 crc kubenswrapper[4492]: I1126 07:00:01.681456 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" event={"ID":"b942b7c1-09d6-4424-a4b8-97f86a2a051f","Type":"ContainerStarted","Data":"6275e5b3d5269039d5499394657aa5cf57cd4024f2087db03e71e12170a48d46"} Nov 26 07:00:02 crc kubenswrapper[4492]: I1126 07:00:02.896561 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.086296 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b942b7c1-09d6-4424-a4b8-97f86a2a051f-secret-volume\") pod \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.086827 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b942b7c1-09d6-4424-a4b8-97f86a2a051f-config-volume\") pod \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.087479 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnhk5\" (UniqueName: \"kubernetes.io/projected/b942b7c1-09d6-4424-a4b8-97f86a2a051f-kube-api-access-pnhk5\") pod \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\" (UID: \"b942b7c1-09d6-4424-a4b8-97f86a2a051f\") " Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.087421 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b942b7c1-09d6-4424-a4b8-97f86a2a051f-config-volume" (OuterVolumeSpecName: "config-volume") pod "b942b7c1-09d6-4424-a4b8-97f86a2a051f" (UID: "b942b7c1-09d6-4424-a4b8-97f86a2a051f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.088225 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b942b7c1-09d6-4424-a4b8-97f86a2a051f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.092221 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b942b7c1-09d6-4424-a4b8-97f86a2a051f-kube-api-access-pnhk5" (OuterVolumeSpecName: "kube-api-access-pnhk5") pod "b942b7c1-09d6-4424-a4b8-97f86a2a051f" (UID: "b942b7c1-09d6-4424-a4b8-97f86a2a051f"). InnerVolumeSpecName "kube-api-access-pnhk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.092646 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b942b7c1-09d6-4424-a4b8-97f86a2a051f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b942b7c1-09d6-4424-a4b8-97f86a2a051f" (UID: "b942b7c1-09d6-4424-a4b8-97f86a2a051f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.189141 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b942b7c1-09d6-4424-a4b8-97f86a2a051f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.189197 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnhk5\" (UniqueName: \"kubernetes.io/projected/b942b7c1-09d6-4424-a4b8-97f86a2a051f-kube-api-access-pnhk5\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.700606 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" event={"ID":"b942b7c1-09d6-4424-a4b8-97f86a2a051f","Type":"ContainerDied","Data":"6275e5b3d5269039d5499394657aa5cf57cd4024f2087db03e71e12170a48d46"} Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.700661 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6275e5b3d5269039d5499394657aa5cf57cd4024f2087db03e71e12170a48d46" Nov 26 07:00:03 crc kubenswrapper[4492]: I1126 07:00:03.700677 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v" Nov 26 07:00:04 crc kubenswrapper[4492]: I1126 07:00:04.747644 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-74hxm" Nov 26 07:00:05 crc kubenswrapper[4492]: I1126 07:00:05.124570 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 07:00:05 crc kubenswrapper[4492]: I1126 07:00:05.124856 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 07:00:05 crc kubenswrapper[4492]: I1126 07:00:05.128633 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 07:00:05 crc kubenswrapper[4492]: I1126 07:00:05.713167 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6b447cd8-tgxfq" Nov 26 07:00:05 crc kubenswrapper[4492]: I1126 07:00:05.756220 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-v92zj"] Nov 26 07:00:14 crc kubenswrapper[4492]: I1126 07:00:14.737701 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-c6rbj" Nov 26 07:00:19 crc kubenswrapper[4492]: I1126 07:00:19.441255 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:00:19 crc kubenswrapper[4492]: I1126 07:00:19.441616 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.575365 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm"] Nov 26 07:00:24 crc kubenswrapper[4492]: E1126 07:00:24.576086 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b942b7c1-09d6-4424-a4b8-97f86a2a051f" containerName="collect-profiles" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.576100 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b942b7c1-09d6-4424-a4b8-97f86a2a051f" containerName="collect-profiles" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.576206 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b942b7c1-09d6-4424-a4b8-97f86a2a051f" containerName="collect-profiles" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.576916 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.578621 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.591741 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm"] Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.647946 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.648018 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m6xz\" (UniqueName: \"kubernetes.io/projected/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-kube-api-access-9m6xz\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.648089 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.749580 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.749735 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m6xz\" (UniqueName: \"kubernetes.io/projected/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-kube-api-access-9m6xz\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.749802 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.750346 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.751677 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.771383 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m6xz\" (UniqueName: \"kubernetes.io/projected/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-kube-api-access-9m6xz\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:24 crc kubenswrapper[4492]: I1126 07:00:24.902008 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:25 crc kubenswrapper[4492]: I1126 07:00:25.286273 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm"] Nov 26 07:00:25 crc kubenswrapper[4492]: I1126 07:00:25.835076 4492 generic.go:334] "Generic (PLEG): container finished" podID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerID="4ff3a2487e06c854087b851df5d5f15655394a35a1a7fc39d779a5fa9e3b003c" exitCode=0 Nov 26 07:00:25 crc kubenswrapper[4492]: I1126 07:00:25.835511 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" event={"ID":"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb","Type":"ContainerDied","Data":"4ff3a2487e06c854087b851df5d5f15655394a35a1a7fc39d779a5fa9e3b003c"} Nov 26 07:00:25 crc kubenswrapper[4492]: I1126 07:00:25.835557 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" event={"ID":"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb","Type":"ContainerStarted","Data":"40bad7ff28c635e223be3c08e3e42245bc0d2fb6086c0176634b8c57ac81d336"} Nov 26 07:00:27 crc kubenswrapper[4492]: I1126 07:00:27.849505 4492 generic.go:334] "Generic (PLEG): container finished" podID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerID="537ff7422994d946e1cc76970f76d40ef3b3e08598495e88ea3abdc7989a5ad2" exitCode=0 Nov 26 07:00:27 crc kubenswrapper[4492]: I1126 07:00:27.849615 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" event={"ID":"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb","Type":"ContainerDied","Data":"537ff7422994d946e1cc76970f76d40ef3b3e08598495e88ea3abdc7989a5ad2"} Nov 26 07:00:28 crc kubenswrapper[4492]: I1126 07:00:28.859727 4492 generic.go:334] "Generic (PLEG): container finished" podID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerID="dd442d49e165f3baf679be8af7bc1c42a609c1a1239f011510e8f84bca049fd6" exitCode=0 Nov 26 07:00:28 crc kubenswrapper[4492]: I1126 07:00:28.859813 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" event={"ID":"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb","Type":"ContainerDied","Data":"dd442d49e165f3baf679be8af7bc1c42a609c1a1239f011510e8f84bca049fd6"} Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.041261 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.220906 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-util\") pod \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.221127 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9m6xz\" (UniqueName: \"kubernetes.io/projected/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-kube-api-access-9m6xz\") pod \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.221233 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-bundle\") pod \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\" (UID: \"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb\") " Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.221991 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-bundle" (OuterVolumeSpecName: "bundle") pod "55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" (UID: "55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.228760 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-kube-api-access-9m6xz" (OuterVolumeSpecName: "kube-api-access-9m6xz") pod "55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" (UID: "55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb"). InnerVolumeSpecName "kube-api-access-9m6xz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.232927 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-util" (OuterVolumeSpecName: "util") pod "55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" (UID: "55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.323667 4492 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-util\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.323707 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9m6xz\" (UniqueName: \"kubernetes.io/projected/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-kube-api-access-9m6xz\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.323727 4492 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.790427 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-v92zj" podUID="ab14021b-87d7-43d0-9357-e8739e2d7dd1" containerName="console" containerID="cri-o://4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8" gracePeriod=15 Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.873250 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" event={"ID":"55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb","Type":"ContainerDied","Data":"40bad7ff28c635e223be3c08e3e42245bc0d2fb6086c0176634b8c57ac81d336"} Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.873428 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40bad7ff28c635e223be3c08e3e42245bc0d2fb6086c0176634b8c57ac81d336" Nov 26 07:00:30 crc kubenswrapper[4492]: I1126 07:00:30.873333 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6jbnmm" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.088777 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-v92zj_ab14021b-87d7-43d0-9357-e8739e2d7dd1/console/0.log" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.088854 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-v92zj" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.135119 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-serving-cert\") pod \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.135417 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-trusted-ca-bundle\") pod \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.135521 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-config\") pod \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.135599 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zh69p\" (UniqueName: \"kubernetes.io/projected/ab14021b-87d7-43d0-9357-e8739e2d7dd1-kube-api-access-zh69p\") pod \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.135722 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-oauth-serving-cert\") pod \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.136131 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-service-ca\") pod \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.136261 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-oauth-config\") pod \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\" (UID: \"ab14021b-87d7-43d0-9357-e8739e2d7dd1\") " Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.136071 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ab14021b-87d7-43d0-9357-e8739e2d7dd1" (UID: "ab14021b-87d7-43d0-9357-e8739e2d7dd1"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.136081 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-config" (OuterVolumeSpecName: "console-config") pod "ab14021b-87d7-43d0-9357-e8739e2d7dd1" (UID: "ab14021b-87d7-43d0-9357-e8739e2d7dd1"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.136385 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-service-ca" (OuterVolumeSpecName: "service-ca") pod "ab14021b-87d7-43d0-9357-e8739e2d7dd1" (UID: "ab14021b-87d7-43d0-9357-e8739e2d7dd1"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.136510 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ab14021b-87d7-43d0-9357-e8739e2d7dd1" (UID: "ab14021b-87d7-43d0-9357-e8739e2d7dd1"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.137099 4492 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.137164 4492 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.137237 4492 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.137285 4492 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ab14021b-87d7-43d0-9357-e8739e2d7dd1-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.151404 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab14021b-87d7-43d0-9357-e8739e2d7dd1-kube-api-access-zh69p" (OuterVolumeSpecName: "kube-api-access-zh69p") pod "ab14021b-87d7-43d0-9357-e8739e2d7dd1" (UID: "ab14021b-87d7-43d0-9357-e8739e2d7dd1"). InnerVolumeSpecName "kube-api-access-zh69p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.155603 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ab14021b-87d7-43d0-9357-e8739e2d7dd1" (UID: "ab14021b-87d7-43d0-9357-e8739e2d7dd1"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.160335 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ab14021b-87d7-43d0-9357-e8739e2d7dd1" (UID: "ab14021b-87d7-43d0-9357-e8739e2d7dd1"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.238724 4492 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.238765 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zh69p\" (UniqueName: \"kubernetes.io/projected/ab14021b-87d7-43d0-9357-e8739e2d7dd1-kube-api-access-zh69p\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.238777 4492 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ab14021b-87d7-43d0-9357-e8739e2d7dd1-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.881489 4492 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-v92zj_ab14021b-87d7-43d0-9357-e8739e2d7dd1/console/0.log" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.881825 4492 generic.go:334] "Generic (PLEG): container finished" podID="ab14021b-87d7-43d0-9357-e8739e2d7dd1" containerID="4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8" exitCode=2 Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.881856 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-v92zj" event={"ID":"ab14021b-87d7-43d0-9357-e8739e2d7dd1","Type":"ContainerDied","Data":"4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8"} Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.881897 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-v92zj" event={"ID":"ab14021b-87d7-43d0-9357-e8739e2d7dd1","Type":"ContainerDied","Data":"ffb766f75f3c234db5489b1d5ad6ab429fdbfd6516cc6ddc63e738f64f79987f"} Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.881918 4492 scope.go:117] "RemoveContainer" containerID="4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.882047 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-v92zj" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.904057 4492 scope.go:117] "RemoveContainer" containerID="4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8" Nov 26 07:00:31 crc kubenswrapper[4492]: E1126 07:00:31.905378 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8\": container with ID starting with 4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8 not found: ID does not exist" containerID="4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.905477 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8"} err="failed to get container status \"4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8\": rpc error: code = NotFound desc = could not find container \"4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8\": container with ID starting with 4e2cb884e92c900bb12aee1b7e5e6bb4c2bd784620b3706043ba58dd9625d7f8 not found: ID does not exist" Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.931729 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-v92zj"] Nov 26 07:00:31 crc kubenswrapper[4492]: I1126 07:00:31.941542 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-v92zj"] Nov 26 07:00:32 crc kubenswrapper[4492]: I1126 07:00:32.449713 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab14021b-87d7-43d0-9357-e8739e2d7dd1" path="/var/lib/kubelet/pods/ab14021b-87d7-43d0-9357-e8739e2d7dd1/volumes" Nov 26 07:00:38 crc kubenswrapper[4492]: I1126 07:00:38.174539 4492 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.237140 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2"] Nov 26 07:00:39 crc kubenswrapper[4492]: E1126 07:00:39.237628 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab14021b-87d7-43d0-9357-e8739e2d7dd1" containerName="console" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.237643 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab14021b-87d7-43d0-9357-e8739e2d7dd1" containerName="console" Nov 26 07:00:39 crc kubenswrapper[4492]: E1126 07:00:39.237655 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerName="extract" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.237660 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerName="extract" Nov 26 07:00:39 crc kubenswrapper[4492]: E1126 07:00:39.237678 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerName="util" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.237683 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerName="util" Nov 26 07:00:39 crc kubenswrapper[4492]: E1126 07:00:39.237694 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerName="pull" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.237699 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerName="pull" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.237793 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab14021b-87d7-43d0-9357-e8739e2d7dd1" containerName="console" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.237805 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="55d8c7aa-c9df-4f3d-8bb2-9de824ff99eb" containerName="extract" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.238217 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.241084 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.241303 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-v6b6w" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.241459 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.242251 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.248234 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.256436 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2"] Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.259727 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3db88b6c-91ab-4253-82c0-49f8aa7c7c92-webhook-cert\") pod \"metallb-operator-controller-manager-7cdffc5cb4-t2cq2\" (UID: \"3db88b6c-91ab-4253-82c0-49f8aa7c7c92\") " pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.259875 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgllx\" (UniqueName: \"kubernetes.io/projected/3db88b6c-91ab-4253-82c0-49f8aa7c7c92-kube-api-access-vgllx\") pod \"metallb-operator-controller-manager-7cdffc5cb4-t2cq2\" (UID: \"3db88b6c-91ab-4253-82c0-49f8aa7c7c92\") " pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.259995 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3db88b6c-91ab-4253-82c0-49f8aa7c7c92-apiservice-cert\") pod \"metallb-operator-controller-manager-7cdffc5cb4-t2cq2\" (UID: \"3db88b6c-91ab-4253-82c0-49f8aa7c7c92\") " pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.361324 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3db88b6c-91ab-4253-82c0-49f8aa7c7c92-webhook-cert\") pod \"metallb-operator-controller-manager-7cdffc5cb4-t2cq2\" (UID: \"3db88b6c-91ab-4253-82c0-49f8aa7c7c92\") " pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.361394 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgllx\" (UniqueName: \"kubernetes.io/projected/3db88b6c-91ab-4253-82c0-49f8aa7c7c92-kube-api-access-vgllx\") pod \"metallb-operator-controller-manager-7cdffc5cb4-t2cq2\" (UID: \"3db88b6c-91ab-4253-82c0-49f8aa7c7c92\") " pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.361422 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3db88b6c-91ab-4253-82c0-49f8aa7c7c92-apiservice-cert\") pod \"metallb-operator-controller-manager-7cdffc5cb4-t2cq2\" (UID: \"3db88b6c-91ab-4253-82c0-49f8aa7c7c92\") " pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.367779 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3db88b6c-91ab-4253-82c0-49f8aa7c7c92-webhook-cert\") pod \"metallb-operator-controller-manager-7cdffc5cb4-t2cq2\" (UID: \"3db88b6c-91ab-4253-82c0-49f8aa7c7c92\") " pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.370702 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3db88b6c-91ab-4253-82c0-49f8aa7c7c92-apiservice-cert\") pod \"metallb-operator-controller-manager-7cdffc5cb4-t2cq2\" (UID: \"3db88b6c-91ab-4253-82c0-49f8aa7c7c92\") " pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.378831 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgllx\" (UniqueName: \"kubernetes.io/projected/3db88b6c-91ab-4253-82c0-49f8aa7c7c92-kube-api-access-vgllx\") pod \"metallb-operator-controller-manager-7cdffc5cb4-t2cq2\" (UID: \"3db88b6c-91ab-4253-82c0-49f8aa7c7c92\") " pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.551730 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.788228 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p"] Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.789207 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.793039 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.793352 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-4btcd" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.794075 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.796713 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2"] Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.807687 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p"] Nov 26 07:00:39 crc kubenswrapper[4492]: W1126 07:00:39.812423 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3db88b6c_91ab_4253_82c0_49f8aa7c7c92.slice/crio-b797f01df6c081e8ed88a0cae716333a49ef51a6dbb05b300a517e976d54bd2e WatchSource:0}: Error finding container b797f01df6c081e8ed88a0cae716333a49ef51a6dbb05b300a517e976d54bd2e: Status 404 returned error can't find the container with id b797f01df6c081e8ed88a0cae716333a49ef51a6dbb05b300a517e976d54bd2e Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.868760 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/14a96576-838d-4885-b92a-fc068709f1b9-apiservice-cert\") pod \"metallb-operator-webhook-server-68b4b8d76d-mj79p\" (UID: \"14a96576-838d-4885-b92a-fc068709f1b9\") " pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.868966 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlqs9\" (UniqueName: \"kubernetes.io/projected/14a96576-838d-4885-b92a-fc068709f1b9-kube-api-access-xlqs9\") pod \"metallb-operator-webhook-server-68b4b8d76d-mj79p\" (UID: \"14a96576-838d-4885-b92a-fc068709f1b9\") " pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.869100 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/14a96576-838d-4885-b92a-fc068709f1b9-webhook-cert\") pod \"metallb-operator-webhook-server-68b4b8d76d-mj79p\" (UID: \"14a96576-838d-4885-b92a-fc068709f1b9\") " pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.935923 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" event={"ID":"3db88b6c-91ab-4253-82c0-49f8aa7c7c92","Type":"ContainerStarted","Data":"b797f01df6c081e8ed88a0cae716333a49ef51a6dbb05b300a517e976d54bd2e"} Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.970419 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlqs9\" (UniqueName: \"kubernetes.io/projected/14a96576-838d-4885-b92a-fc068709f1b9-kube-api-access-xlqs9\") pod \"metallb-operator-webhook-server-68b4b8d76d-mj79p\" (UID: \"14a96576-838d-4885-b92a-fc068709f1b9\") " pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.970538 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/14a96576-838d-4885-b92a-fc068709f1b9-webhook-cert\") pod \"metallb-operator-webhook-server-68b4b8d76d-mj79p\" (UID: \"14a96576-838d-4885-b92a-fc068709f1b9\") " pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.970706 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/14a96576-838d-4885-b92a-fc068709f1b9-apiservice-cert\") pod \"metallb-operator-webhook-server-68b4b8d76d-mj79p\" (UID: \"14a96576-838d-4885-b92a-fc068709f1b9\") " pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.978380 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/14a96576-838d-4885-b92a-fc068709f1b9-apiservice-cert\") pod \"metallb-operator-webhook-server-68b4b8d76d-mj79p\" (UID: \"14a96576-838d-4885-b92a-fc068709f1b9\") " pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.979730 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/14a96576-838d-4885-b92a-fc068709f1b9-webhook-cert\") pod \"metallb-operator-webhook-server-68b4b8d76d-mj79p\" (UID: \"14a96576-838d-4885-b92a-fc068709f1b9\") " pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:39 crc kubenswrapper[4492]: I1126 07:00:39.986893 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlqs9\" (UniqueName: \"kubernetes.io/projected/14a96576-838d-4885-b92a-fc068709f1b9-kube-api-access-xlqs9\") pod \"metallb-operator-webhook-server-68b4b8d76d-mj79p\" (UID: \"14a96576-838d-4885-b92a-fc068709f1b9\") " pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:40 crc kubenswrapper[4492]: I1126 07:00:40.128582 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:40 crc kubenswrapper[4492]: I1126 07:00:40.340957 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p"] Nov 26 07:00:40 crc kubenswrapper[4492]: W1126 07:00:40.343644 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14a96576_838d_4885_b92a_fc068709f1b9.slice/crio-b373bfa1d57d7490e27128945855e88b1c75db558148abe2977b002687c90786 WatchSource:0}: Error finding container b373bfa1d57d7490e27128945855e88b1c75db558148abe2977b002687c90786: Status 404 returned error can't find the container with id b373bfa1d57d7490e27128945855e88b1c75db558148abe2977b002687c90786 Nov 26 07:00:40 crc kubenswrapper[4492]: I1126 07:00:40.942697 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" event={"ID":"14a96576-838d-4885-b92a-fc068709f1b9","Type":"ContainerStarted","Data":"b373bfa1d57d7490e27128945855e88b1c75db558148abe2977b002687c90786"} Nov 26 07:00:43 crc kubenswrapper[4492]: I1126 07:00:43.966360 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" event={"ID":"3db88b6c-91ab-4253-82c0-49f8aa7c7c92","Type":"ContainerStarted","Data":"98088de54cac710f97cf9ec6e3da36447a9d15bfb1858d5d479c390ed386932b"} Nov 26 07:00:43 crc kubenswrapper[4492]: I1126 07:00:43.967140 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:00:43 crc kubenswrapper[4492]: I1126 07:00:43.988146 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" podStartSLOduration=1.396289994 podStartE2EDuration="4.988122808s" podCreationTimestamp="2025-11-26 07:00:39 +0000 UTC" firstStartedPulling="2025-11-26 07:00:39.825555583 +0000 UTC m=+735.709443881" lastFinishedPulling="2025-11-26 07:00:43.417388397 +0000 UTC m=+739.301276695" observedRunningTime="2025-11-26 07:00:43.984141983 +0000 UTC m=+739.868030281" watchObservedRunningTime="2025-11-26 07:00:43.988122808 +0000 UTC m=+739.872011106" Nov 26 07:00:45 crc kubenswrapper[4492]: I1126 07:00:45.984619 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" event={"ID":"14a96576-838d-4885-b92a-fc068709f1b9","Type":"ContainerStarted","Data":"6e642c295939865c30b56e5d21a23ccd39cad6f0d4e96d6f7c8902aac2ddb84e"} Nov 26 07:00:46 crc kubenswrapper[4492]: I1126 07:00:46.007221 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" podStartSLOduration=1.804923426 podStartE2EDuration="7.007206632s" podCreationTimestamp="2025-11-26 07:00:39 +0000 UTC" firstStartedPulling="2025-11-26 07:00:40.345769362 +0000 UTC m=+736.229657660" lastFinishedPulling="2025-11-26 07:00:45.548052567 +0000 UTC m=+741.431940866" observedRunningTime="2025-11-26 07:00:46.003574363 +0000 UTC m=+741.887462661" watchObservedRunningTime="2025-11-26 07:00:46.007206632 +0000 UTC m=+741.891094920" Nov 26 07:00:46 crc kubenswrapper[4492]: I1126 07:00:46.989239 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:00:49 crc kubenswrapper[4492]: I1126 07:00:49.441757 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:00:49 crc kubenswrapper[4492]: I1126 07:00:49.441816 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:01:00 crc kubenswrapper[4492]: I1126 07:01:00.133502 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-68b4b8d76d-mj79p" Nov 26 07:01:19 crc kubenswrapper[4492]: I1126 07:01:19.441845 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:01:19 crc kubenswrapper[4492]: I1126 07:01:19.442534 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:01:19 crc kubenswrapper[4492]: I1126 07:01:19.442583 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:01:19 crc kubenswrapper[4492]: I1126 07:01:19.442952 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3a3d7f8ad7361b1a0985dafaf6a7904b1bcd29d5ae978e67890841e77797b9ac"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:01:19 crc kubenswrapper[4492]: I1126 07:01:19.443013 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://3a3d7f8ad7361b1a0985dafaf6a7904b1bcd29d5ae978e67890841e77797b9ac" gracePeriod=600 Nov 26 07:01:19 crc kubenswrapper[4492]: I1126 07:01:19.555040 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7cdffc5cb4-t2cq2" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.101492 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-xqdvg"] Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.103847 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.108226 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-5prkk" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.108405 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.108520 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.110052 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p"] Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.110680 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.118486 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-metrics\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.118537 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/952d9cf7-bc05-410a-a113-33fee9c8fe00-cert\") pod \"frr-k8s-webhook-server-6998585d5-tjq4p\" (UID: \"952d9cf7-bc05-410a-a113-33fee9c8fe00\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.118578 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-frr-sockets\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.118593 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5ng6\" (UniqueName: \"kubernetes.io/projected/952d9cf7-bc05-410a-a113-33fee9c8fe00-kube-api-access-j5ng6\") pod \"frr-k8s-webhook-server-6998585d5-tjq4p\" (UID: \"952d9cf7-bc05-410a-a113-33fee9c8fe00\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.118616 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-frr-conf\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.118658 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-reloader\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.118690 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xkqr\" (UniqueName: \"kubernetes.io/projected/371572e9-0a49-4840-9d23-4c4af6448a2d-kube-api-access-5xkqr\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.118705 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/371572e9-0a49-4840-9d23-4c4af6448a2d-metrics-certs\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.118738 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/371572e9-0a49-4840-9d23-4c4af6448a2d-frr-startup\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.121650 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.129490 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p"] Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.196233 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="3a3d7f8ad7361b1a0985dafaf6a7904b1bcd29d5ae978e67890841e77797b9ac" exitCode=0 Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.196284 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"3a3d7f8ad7361b1a0985dafaf6a7904b1bcd29d5ae978e67890841e77797b9ac"} Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.196316 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"a50e8acb1a9896b6c0b164453458208e77d6a13aa21290189661d9ca53c2668b"} Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.196336 4492 scope.go:117] "RemoveContainer" containerID="d4fa1ef97b74bcf353b427d83c9b4b11261068cbcdfde33a49de78b55d802254" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.216017 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-b64gw"] Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.217720 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220004 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-metrics\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220091 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/952d9cf7-bc05-410a-a113-33fee9c8fe00-cert\") pod \"frr-k8s-webhook-server-6998585d5-tjq4p\" (UID: \"952d9cf7-bc05-410a-a113-33fee9c8fe00\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220135 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hjgb\" (UniqueName: \"kubernetes.io/projected/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-kube-api-access-6hjgb\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220164 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-frr-sockets\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220195 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5ng6\" (UniqueName: \"kubernetes.io/projected/952d9cf7-bc05-410a-a113-33fee9c8fe00-kube-api-access-j5ng6\") pod \"frr-k8s-webhook-server-6998585d5-tjq4p\" (UID: \"952d9cf7-bc05-410a-a113-33fee9c8fe00\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220219 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-metallb-excludel2\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220237 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-metrics-certs\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220258 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-frr-conf\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220280 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-reloader\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220301 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xkqr\" (UniqueName: \"kubernetes.io/projected/371572e9-0a49-4840-9d23-4c4af6448a2d-kube-api-access-5xkqr\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220316 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/371572e9-0a49-4840-9d23-4c4af6448a2d-metrics-certs\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220367 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/371572e9-0a49-4840-9d23-4c4af6448a2d-frr-startup\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220386 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-memberlist\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.220779 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-metrics\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.222030 4492 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.222080 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/952d9cf7-bc05-410a-a113-33fee9c8fe00-cert podName:952d9cf7-bc05-410a-a113-33fee9c8fe00 nodeName:}" failed. No retries permitted until 2025-11-26 07:01:20.722065506 +0000 UTC m=+776.605953804 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/952d9cf7-bc05-410a-a113-33fee9c8fe00-cert") pod "frr-k8s-webhook-server-6998585d5-tjq4p" (UID: "952d9cf7-bc05-410a-a113-33fee9c8fe00") : secret "frr-k8s-webhook-server-cert" not found Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.222634 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-frr-sockets\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.223107 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-frr-conf\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.223335 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/371572e9-0a49-4840-9d23-4c4af6448a2d-reloader\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.223562 4492 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.223600 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/371572e9-0a49-4840-9d23-4c4af6448a2d-metrics-certs podName:371572e9-0a49-4840-9d23-4c4af6448a2d nodeName:}" failed. No retries permitted until 2025-11-26 07:01:20.723588749 +0000 UTC m=+776.607477047 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/371572e9-0a49-4840-9d23-4c4af6448a2d-metrics-certs") pod "frr-k8s-xqdvg" (UID: "371572e9-0a49-4840-9d23-4c4af6448a2d") : secret "frr-k8s-certs-secret" not found Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.224221 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/371572e9-0a49-4840-9d23-4c4af6448a2d-frr-startup\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.227485 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.229292 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.229439 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-5bpwm" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.234239 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.252480 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-vsr46"] Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.253603 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.258305 4492 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.260708 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xkqr\" (UniqueName: \"kubernetes.io/projected/371572e9-0a49-4840-9d23-4c4af6448a2d-kube-api-access-5xkqr\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.263364 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5ng6\" (UniqueName: \"kubernetes.io/projected/952d9cf7-bc05-410a-a113-33fee9c8fe00-kube-api-access-j5ng6\") pod \"frr-k8s-webhook-server-6998585d5-tjq4p\" (UID: \"952d9cf7-bc05-410a-a113-33fee9c8fe00\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.267237 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-vsr46"] Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.321142 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/09a90602-11ab-4ecc-9b36-e4e2cb6839c9-metrics-certs\") pod \"controller-6c7b4b5f48-vsr46\" (UID: \"09a90602-11ab-4ecc-9b36-e4e2cb6839c9\") " pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.321237 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09a90602-11ab-4ecc-9b36-e4e2cb6839c9-cert\") pod \"controller-6c7b4b5f48-vsr46\" (UID: \"09a90602-11ab-4ecc-9b36-e4e2cb6839c9\") " pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.321263 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-memberlist\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.321338 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hjgb\" (UniqueName: \"kubernetes.io/projected/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-kube-api-access-6hjgb\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.321379 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-metallb-excludel2\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.321396 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-metrics-certs\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.321421 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22zfm\" (UniqueName: \"kubernetes.io/projected/09a90602-11ab-4ecc-9b36-e4e2cb6839c9-kube-api-access-22zfm\") pod \"controller-6c7b4b5f48-vsr46\" (UID: \"09a90602-11ab-4ecc-9b36-e4e2cb6839c9\") " pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.321579 4492 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.321633 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-memberlist podName:ef9cb016-afe4-4d45-a3cc-a2dac1533aef nodeName:}" failed. No retries permitted until 2025-11-26 07:01:20.821620344 +0000 UTC m=+776.705508643 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-memberlist") pod "speaker-b64gw" (UID: "ef9cb016-afe4-4d45-a3cc-a2dac1533aef") : secret "metallb-memberlist" not found Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.321877 4492 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.322046 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-metrics-certs podName:ef9cb016-afe4-4d45-a3cc-a2dac1533aef nodeName:}" failed. No retries permitted until 2025-11-26 07:01:20.82202745 +0000 UTC m=+776.705915748 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-metrics-certs") pod "speaker-b64gw" (UID: "ef9cb016-afe4-4d45-a3cc-a2dac1533aef") : secret "speaker-certs-secret" not found Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.322791 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-metallb-excludel2\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.335013 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hjgb\" (UniqueName: \"kubernetes.io/projected/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-kube-api-access-6hjgb\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.422741 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/09a90602-11ab-4ecc-9b36-e4e2cb6839c9-metrics-certs\") pod \"controller-6c7b4b5f48-vsr46\" (UID: \"09a90602-11ab-4ecc-9b36-e4e2cb6839c9\") " pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.422833 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09a90602-11ab-4ecc-9b36-e4e2cb6839c9-cert\") pod \"controller-6c7b4b5f48-vsr46\" (UID: \"09a90602-11ab-4ecc-9b36-e4e2cb6839c9\") " pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.423053 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22zfm\" (UniqueName: \"kubernetes.io/projected/09a90602-11ab-4ecc-9b36-e4e2cb6839c9-kube-api-access-22zfm\") pod \"controller-6c7b4b5f48-vsr46\" (UID: \"09a90602-11ab-4ecc-9b36-e4e2cb6839c9\") " pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.427025 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/09a90602-11ab-4ecc-9b36-e4e2cb6839c9-metrics-certs\") pod \"controller-6c7b4b5f48-vsr46\" (UID: \"09a90602-11ab-4ecc-9b36-e4e2cb6839c9\") " pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.427327 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09a90602-11ab-4ecc-9b36-e4e2cb6839c9-cert\") pod \"controller-6c7b4b5f48-vsr46\" (UID: \"09a90602-11ab-4ecc-9b36-e4e2cb6839c9\") " pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.443557 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22zfm\" (UniqueName: \"kubernetes.io/projected/09a90602-11ab-4ecc-9b36-e4e2cb6839c9-kube-api-access-22zfm\") pod \"controller-6c7b4b5f48-vsr46\" (UID: \"09a90602-11ab-4ecc-9b36-e4e2cb6839c9\") " pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.604023 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.726825 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/952d9cf7-bc05-410a-a113-33fee9c8fe00-cert\") pod \"frr-k8s-webhook-server-6998585d5-tjq4p\" (UID: \"952d9cf7-bc05-410a-a113-33fee9c8fe00\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.727199 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/371572e9-0a49-4840-9d23-4c4af6448a2d-metrics-certs\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.732512 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/952d9cf7-bc05-410a-a113-33fee9c8fe00-cert\") pod \"frr-k8s-webhook-server-6998585d5-tjq4p\" (UID: \"952d9cf7-bc05-410a-a113-33fee9c8fe00\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.734093 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/371572e9-0a49-4840-9d23-4c4af6448a2d-metrics-certs\") pod \"frr-k8s-xqdvg\" (UID: \"371572e9-0a49-4840-9d23-4c4af6448a2d\") " pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.790414 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-vsr46"] Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.827998 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-metrics-certs\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.828148 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-memberlist\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.828351 4492 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 07:01:20 crc kubenswrapper[4492]: E1126 07:01:20.828439 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-memberlist podName:ef9cb016-afe4-4d45-a3cc-a2dac1533aef nodeName:}" failed. No retries permitted until 2025-11-26 07:01:21.828425444 +0000 UTC m=+777.712313742 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-memberlist") pod "speaker-b64gw" (UID: "ef9cb016-afe4-4d45-a3cc-a2dac1533aef") : secret "metallb-memberlist" not found Nov 26 07:01:20 crc kubenswrapper[4492]: I1126 07:01:20.832858 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-metrics-certs\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.015105 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.024404 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.205896 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-vsr46" event={"ID":"09a90602-11ab-4ecc-9b36-e4e2cb6839c9","Type":"ContainerStarted","Data":"0e075d764c9792a2beef6c8db02d6ede1b8772e08fe8d97f711e2adc1a491779"} Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.206149 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-vsr46" event={"ID":"09a90602-11ab-4ecc-9b36-e4e2cb6839c9","Type":"ContainerStarted","Data":"14cf51518a45ecbc35d38b58c01acb3171933f86714e54f06c71911d6fb76e87"} Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.206167 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-vsr46" event={"ID":"09a90602-11ab-4ecc-9b36-e4e2cb6839c9","Type":"ContainerStarted","Data":"985e0909302077c413aa96caaccb731e7f1fb3c112dbdc4cbd392296a554bff8"} Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.206202 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.206894 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerStarted","Data":"816ad74c3d76e2ffacbb8b6a048e0766a9806ae1eba56453deb398fa4a6d34e2"} Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.220034 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-vsr46" podStartSLOduration=1.220006859 podStartE2EDuration="1.220006859s" podCreationTimestamp="2025-11-26 07:01:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:01:21.217957074 +0000 UTC m=+777.101845372" watchObservedRunningTime="2025-11-26 07:01:21.220006859 +0000 UTC m=+777.103895157" Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.390165 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p"] Nov 26 07:01:21 crc kubenswrapper[4492]: W1126 07:01:21.394601 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod952d9cf7_bc05_410a_a113_33fee9c8fe00.slice/crio-04fc2a4b0ca08f392f19c4baa735acc7d042fe60d86ca3f5c9aa5913d1e2609f WatchSource:0}: Error finding container 04fc2a4b0ca08f392f19c4baa735acc7d042fe60d86ca3f5c9aa5913d1e2609f: Status 404 returned error can't find the container with id 04fc2a4b0ca08f392f19c4baa735acc7d042fe60d86ca3f5c9aa5913d1e2609f Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.842380 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-memberlist\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:21 crc kubenswrapper[4492]: I1126 07:01:21.850807 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ef9cb016-afe4-4d45-a3cc-a2dac1533aef-memberlist\") pod \"speaker-b64gw\" (UID: \"ef9cb016-afe4-4d45-a3cc-a2dac1533aef\") " pod="metallb-system/speaker-b64gw" Nov 26 07:01:22 crc kubenswrapper[4492]: I1126 07:01:22.039422 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-b64gw" Nov 26 07:01:22 crc kubenswrapper[4492]: W1126 07:01:22.068735 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef9cb016_afe4_4d45_a3cc_a2dac1533aef.slice/crio-9ba7d7c788728553e35f14737ddd754f812c42ac9f1935143bc43cfe61b55716 WatchSource:0}: Error finding container 9ba7d7c788728553e35f14737ddd754f812c42ac9f1935143bc43cfe61b55716: Status 404 returned error can't find the container with id 9ba7d7c788728553e35f14737ddd754f812c42ac9f1935143bc43cfe61b55716 Nov 26 07:01:22 crc kubenswrapper[4492]: I1126 07:01:22.274625 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" event={"ID":"952d9cf7-bc05-410a-a113-33fee9c8fe00","Type":"ContainerStarted","Data":"04fc2a4b0ca08f392f19c4baa735acc7d042fe60d86ca3f5c9aa5913d1e2609f"} Nov 26 07:01:22 crc kubenswrapper[4492]: I1126 07:01:22.275991 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-b64gw" event={"ID":"ef9cb016-afe4-4d45-a3cc-a2dac1533aef","Type":"ContainerStarted","Data":"9ba7d7c788728553e35f14737ddd754f812c42ac9f1935143bc43cfe61b55716"} Nov 26 07:01:23 crc kubenswrapper[4492]: I1126 07:01:23.288482 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-b64gw" event={"ID":"ef9cb016-afe4-4d45-a3cc-a2dac1533aef","Type":"ContainerStarted","Data":"d6d545715e174d905a4a90c678680509d2dfe12a47dbf0f4017dcabe4c82cc71"} Nov 26 07:01:23 crc kubenswrapper[4492]: I1126 07:01:23.288761 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-b64gw" Nov 26 07:01:23 crc kubenswrapper[4492]: I1126 07:01:23.288782 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-b64gw" event={"ID":"ef9cb016-afe4-4d45-a3cc-a2dac1533aef","Type":"ContainerStarted","Data":"df7c271a2741d07fbfb44304cd37c80081e05473a7748a6993dba9866af992be"} Nov 26 07:01:23 crc kubenswrapper[4492]: I1126 07:01:23.312066 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-b64gw" podStartSLOduration=3.312050469 podStartE2EDuration="3.312050469s" podCreationTimestamp="2025-11-26 07:01:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:01:23.311516365 +0000 UTC m=+779.195404653" watchObservedRunningTime="2025-11-26 07:01:23.312050469 +0000 UTC m=+779.195938768" Nov 26 07:01:28 crc kubenswrapper[4492]: I1126 07:01:28.330766 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" event={"ID":"952d9cf7-bc05-410a-a113-33fee9c8fe00","Type":"ContainerStarted","Data":"ae2b891b62582c7cda74ace584b6f0a72bb0e8c7aefb18579a02bf5e0bb8cc48"} Nov 26 07:01:28 crc kubenswrapper[4492]: I1126 07:01:28.331405 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:28 crc kubenswrapper[4492]: I1126 07:01:28.332584 4492 generic.go:334] "Generic (PLEG): container finished" podID="371572e9-0a49-4840-9d23-4c4af6448a2d" containerID="5ab56b8855477514e9ec678f43f6e22a01aecf1d0c68cd091561e56881a0f894" exitCode=0 Nov 26 07:01:28 crc kubenswrapper[4492]: I1126 07:01:28.332655 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerDied","Data":"5ab56b8855477514e9ec678f43f6e22a01aecf1d0c68cd091561e56881a0f894"} Nov 26 07:01:28 crc kubenswrapper[4492]: I1126 07:01:28.353389 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" podStartSLOduration=1.6508542720000001 podStartE2EDuration="8.353375204s" podCreationTimestamp="2025-11-26 07:01:20 +0000 UTC" firstStartedPulling="2025-11-26 07:01:21.397283357 +0000 UTC m=+777.281171655" lastFinishedPulling="2025-11-26 07:01:28.099804289 +0000 UTC m=+783.983692587" observedRunningTime="2025-11-26 07:01:28.350749817 +0000 UTC m=+784.234638114" watchObservedRunningTime="2025-11-26 07:01:28.353375204 +0000 UTC m=+784.237263502" Nov 26 07:01:29 crc kubenswrapper[4492]: I1126 07:01:29.344159 4492 generic.go:334] "Generic (PLEG): container finished" podID="371572e9-0a49-4840-9d23-4c4af6448a2d" containerID="d210d428a33595fa3349e58143e25c61e4088321718098493c78c1f21b53a986" exitCode=0 Nov 26 07:01:29 crc kubenswrapper[4492]: I1126 07:01:29.344260 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerDied","Data":"d210d428a33595fa3349e58143e25c61e4088321718098493c78c1f21b53a986"} Nov 26 07:01:30 crc kubenswrapper[4492]: I1126 07:01:30.354289 4492 generic.go:334] "Generic (PLEG): container finished" podID="371572e9-0a49-4840-9d23-4c4af6448a2d" containerID="dbdc31dfceef72aec1cd0a1c8c0703f8fda27083351624945fbe66ef6c74079c" exitCode=0 Nov 26 07:01:30 crc kubenswrapper[4492]: I1126 07:01:30.354355 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerDied","Data":"dbdc31dfceef72aec1cd0a1c8c0703f8fda27083351624945fbe66ef6c74079c"} Nov 26 07:01:30 crc kubenswrapper[4492]: I1126 07:01:30.608553 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-vsr46" Nov 26 07:01:31 crc kubenswrapper[4492]: I1126 07:01:31.366504 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerStarted","Data":"36b533a5b2b60df3e0543b6875c6471af01f5bac1499c4de21ee0b0dd41671f1"} Nov 26 07:01:31 crc kubenswrapper[4492]: I1126 07:01:31.366866 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:31 crc kubenswrapper[4492]: I1126 07:01:31.366892 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerStarted","Data":"c595da678687d1f0368d350a7d9f8ac17d0aae9e716919537250d8949583a6f6"} Nov 26 07:01:31 crc kubenswrapper[4492]: I1126 07:01:31.366903 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerStarted","Data":"98f79e00b238a72188ce927f31f4462352dd8c574e9181bf45c25e6bd91e2c68"} Nov 26 07:01:31 crc kubenswrapper[4492]: I1126 07:01:31.366913 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerStarted","Data":"51d9b0c7c6183462eb60a42d2312f11c4f643178708dfcbfb1b1fdd8627e2767"} Nov 26 07:01:31 crc kubenswrapper[4492]: I1126 07:01:31.366921 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerStarted","Data":"7141ccb2fbb55ac42d56bfd72bf64b5b173d175644e151e4edabb9934e4174d7"} Nov 26 07:01:31 crc kubenswrapper[4492]: I1126 07:01:31.366931 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-xqdvg" event={"ID":"371572e9-0a49-4840-9d23-4c4af6448a2d","Type":"ContainerStarted","Data":"e120348bd138d9d1b5ffa377c05f6bbc62bb8bc35be3d4fdacb6c1daff2a8d68"} Nov 26 07:01:32 crc kubenswrapper[4492]: I1126 07:01:32.044224 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-b64gw" Nov 26 07:01:32 crc kubenswrapper[4492]: I1126 07:01:32.062319 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-xqdvg" podStartSLOduration=5.055626175 podStartE2EDuration="12.062282098s" podCreationTimestamp="2025-11-26 07:01:20 +0000 UTC" firstStartedPulling="2025-11-26 07:01:21.106928495 +0000 UTC m=+776.990816793" lastFinishedPulling="2025-11-26 07:01:28.113584417 +0000 UTC m=+783.997472716" observedRunningTime="2025-11-26 07:01:31.398052348 +0000 UTC m=+787.281940646" watchObservedRunningTime="2025-11-26 07:01:32.062282098 +0000 UTC m=+787.946170395" Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.063500 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-4zc4p"] Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.064454 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-4zc4p" Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.069150 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-lzxr5" Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.069484 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.070356 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.107035 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-4zc4p"] Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.147149 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dq4wp\" (UniqueName: \"kubernetes.io/projected/585ce147-2e6e-4e8e-9c2d-cf9743914d71-kube-api-access-dq4wp\") pod \"openstack-operator-index-4zc4p\" (UID: \"585ce147-2e6e-4e8e-9c2d-cf9743914d71\") " pod="openstack-operators/openstack-operator-index-4zc4p" Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.248912 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dq4wp\" (UniqueName: \"kubernetes.io/projected/585ce147-2e6e-4e8e-9c2d-cf9743914d71-kube-api-access-dq4wp\") pod \"openstack-operator-index-4zc4p\" (UID: \"585ce147-2e6e-4e8e-9c2d-cf9743914d71\") " pod="openstack-operators/openstack-operator-index-4zc4p" Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.274836 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dq4wp\" (UniqueName: \"kubernetes.io/projected/585ce147-2e6e-4e8e-9c2d-cf9743914d71-kube-api-access-dq4wp\") pod \"openstack-operator-index-4zc4p\" (UID: \"585ce147-2e6e-4e8e-9c2d-cf9743914d71\") " pod="openstack-operators/openstack-operator-index-4zc4p" Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.379505 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-4zc4p" Nov 26 07:01:34 crc kubenswrapper[4492]: I1126 07:01:34.762108 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-4zc4p"] Nov 26 07:01:35 crc kubenswrapper[4492]: I1126 07:01:35.400873 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-4zc4p" event={"ID":"585ce147-2e6e-4e8e-9c2d-cf9743914d71","Type":"ContainerStarted","Data":"0ef142fd11e2085ad83266cb76e9ba118443950d745b34affa3820b4f2445ba7"} Nov 26 07:01:36 crc kubenswrapper[4492]: I1126 07:01:36.015349 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:36 crc kubenswrapper[4492]: I1126 07:01:36.048702 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:36 crc kubenswrapper[4492]: I1126 07:01:36.410266 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-4zc4p" event={"ID":"585ce147-2e6e-4e8e-9c2d-cf9743914d71","Type":"ContainerStarted","Data":"35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3"} Nov 26 07:01:36 crc kubenswrapper[4492]: I1126 07:01:36.446419 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-4zc4p" podStartSLOduration=1.6100533719999999 podStartE2EDuration="2.446396342s" podCreationTimestamp="2025-11-26 07:01:34 +0000 UTC" firstStartedPulling="2025-11-26 07:01:34.770478435 +0000 UTC m=+790.654366733" lastFinishedPulling="2025-11-26 07:01:35.606821405 +0000 UTC m=+791.490709703" observedRunningTime="2025-11-26 07:01:36.444290872 +0000 UTC m=+792.328179171" watchObservedRunningTime="2025-11-26 07:01:36.446396342 +0000 UTC m=+792.330284639" Nov 26 07:01:37 crc kubenswrapper[4492]: I1126 07:01:37.443444 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-4zc4p"] Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.048197 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-tkbsb"] Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.049016 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-tkbsb" Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.061665 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-tkbsb"] Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.108647 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvgqq\" (UniqueName: \"kubernetes.io/projected/4fc81162-3bdb-48ba-b606-69846989d72e-kube-api-access-fvgqq\") pod \"openstack-operator-index-tkbsb\" (UID: \"4fc81162-3bdb-48ba-b606-69846989d72e\") " pod="openstack-operators/openstack-operator-index-tkbsb" Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.210163 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvgqq\" (UniqueName: \"kubernetes.io/projected/4fc81162-3bdb-48ba-b606-69846989d72e-kube-api-access-fvgqq\") pod \"openstack-operator-index-tkbsb\" (UID: \"4fc81162-3bdb-48ba-b606-69846989d72e\") " pod="openstack-operators/openstack-operator-index-tkbsb" Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.229042 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvgqq\" (UniqueName: \"kubernetes.io/projected/4fc81162-3bdb-48ba-b606-69846989d72e-kube-api-access-fvgqq\") pod \"openstack-operator-index-tkbsb\" (UID: \"4fc81162-3bdb-48ba-b606-69846989d72e\") " pod="openstack-operators/openstack-operator-index-tkbsb" Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.368040 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-tkbsb" Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.423945 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-4zc4p" podUID="585ce147-2e6e-4e8e-9c2d-cf9743914d71" containerName="registry-server" containerID="cri-o://35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3" gracePeriod=2 Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.753971 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-4zc4p" Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.758349 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-tkbsb"] Nov 26 07:01:38 crc kubenswrapper[4492]: W1126 07:01:38.772465 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4fc81162_3bdb_48ba_b606_69846989d72e.slice/crio-dec98cdbca7c81a325ff9c25ea778ffcc3b8e6aec3651126fdd2150a1cd17873 WatchSource:0}: Error finding container dec98cdbca7c81a325ff9c25ea778ffcc3b8e6aec3651126fdd2150a1cd17873: Status 404 returned error can't find the container with id dec98cdbca7c81a325ff9c25ea778ffcc3b8e6aec3651126fdd2150a1cd17873 Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.920476 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dq4wp\" (UniqueName: \"kubernetes.io/projected/585ce147-2e6e-4e8e-9c2d-cf9743914d71-kube-api-access-dq4wp\") pod \"585ce147-2e6e-4e8e-9c2d-cf9743914d71\" (UID: \"585ce147-2e6e-4e8e-9c2d-cf9743914d71\") " Nov 26 07:01:38 crc kubenswrapper[4492]: I1126 07:01:38.926549 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/585ce147-2e6e-4e8e-9c2d-cf9743914d71-kube-api-access-dq4wp" (OuterVolumeSpecName: "kube-api-access-dq4wp") pod "585ce147-2e6e-4e8e-9c2d-cf9743914d71" (UID: "585ce147-2e6e-4e8e-9c2d-cf9743914d71"). InnerVolumeSpecName "kube-api-access-dq4wp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.022268 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dq4wp\" (UniqueName: \"kubernetes.io/projected/585ce147-2e6e-4e8e-9c2d-cf9743914d71-kube-api-access-dq4wp\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.434991 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-tkbsb" event={"ID":"4fc81162-3bdb-48ba-b606-69846989d72e","Type":"ContainerStarted","Data":"53984f7f024c585ba2bb20494d6ccdbf23451419c5b552c07c113ef4b26cd6b0"} Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.435405 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-tkbsb" event={"ID":"4fc81162-3bdb-48ba-b606-69846989d72e","Type":"ContainerStarted","Data":"dec98cdbca7c81a325ff9c25ea778ffcc3b8e6aec3651126fdd2150a1cd17873"} Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.438459 4492 generic.go:334] "Generic (PLEG): container finished" podID="585ce147-2e6e-4e8e-9c2d-cf9743914d71" containerID="35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3" exitCode=0 Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.438490 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-4zc4p" event={"ID":"585ce147-2e6e-4e8e-9c2d-cf9743914d71","Type":"ContainerDied","Data":"35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3"} Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.438507 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-4zc4p" event={"ID":"585ce147-2e6e-4e8e-9c2d-cf9743914d71","Type":"ContainerDied","Data":"0ef142fd11e2085ad83266cb76e9ba118443950d745b34affa3820b4f2445ba7"} Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.438526 4492 scope.go:117] "RemoveContainer" containerID="35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3" Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.438622 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-4zc4p" Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.448968 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-tkbsb" podStartSLOduration=0.928566052 podStartE2EDuration="1.448947303s" podCreationTimestamp="2025-11-26 07:01:38 +0000 UTC" firstStartedPulling="2025-11-26 07:01:38.778000293 +0000 UTC m=+794.661888591" lastFinishedPulling="2025-11-26 07:01:39.298381544 +0000 UTC m=+795.182269842" observedRunningTime="2025-11-26 07:01:39.448498779 +0000 UTC m=+795.332387076" watchObservedRunningTime="2025-11-26 07:01:39.448947303 +0000 UTC m=+795.332835600" Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.456837 4492 scope.go:117] "RemoveContainer" containerID="35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3" Nov 26 07:01:39 crc kubenswrapper[4492]: E1126 07:01:39.457868 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3\": container with ID starting with 35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3 not found: ID does not exist" containerID="35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3" Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.457916 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3"} err="failed to get container status \"35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3\": rpc error: code = NotFound desc = could not find container \"35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3\": container with ID starting with 35af89b73eaeaafb800ae3c43de8ab430c6feb4276cd583e316bcfbdf939a0a3 not found: ID does not exist" Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.466599 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-4zc4p"] Nov 26 07:01:39 crc kubenswrapper[4492]: I1126 07:01:39.471113 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-4zc4p"] Nov 26 07:01:40 crc kubenswrapper[4492]: I1126 07:01:40.445747 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="585ce147-2e6e-4e8e-9c2d-cf9743914d71" path="/var/lib/kubelet/pods/585ce147-2e6e-4e8e-9c2d-cf9743914d71/volumes" Nov 26 07:01:41 crc kubenswrapper[4492]: I1126 07:01:41.017938 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-xqdvg" Nov 26 07:01:41 crc kubenswrapper[4492]: I1126 07:01:41.028774 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-tjq4p" Nov 26 07:01:48 crc kubenswrapper[4492]: I1126 07:01:48.368480 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-tkbsb" Nov 26 07:01:48 crc kubenswrapper[4492]: I1126 07:01:48.369544 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-tkbsb" Nov 26 07:01:48 crc kubenswrapper[4492]: I1126 07:01:48.396758 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-tkbsb" Nov 26 07:01:48 crc kubenswrapper[4492]: I1126 07:01:48.514719 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-tkbsb" Nov 26 07:01:50 crc kubenswrapper[4492]: I1126 07:01:50.877815 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r"] Nov 26 07:01:50 crc kubenswrapper[4492]: E1126 07:01:50.878076 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="585ce147-2e6e-4e8e-9c2d-cf9743914d71" containerName="registry-server" Nov 26 07:01:50 crc kubenswrapper[4492]: I1126 07:01:50.878092 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="585ce147-2e6e-4e8e-9c2d-cf9743914d71" containerName="registry-server" Nov 26 07:01:50 crc kubenswrapper[4492]: I1126 07:01:50.878214 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="585ce147-2e6e-4e8e-9c2d-cf9743914d71" containerName="registry-server" Nov 26 07:01:50 crc kubenswrapper[4492]: I1126 07:01:50.878972 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:50 crc kubenswrapper[4492]: I1126 07:01:50.881193 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-zdnwm" Nov 26 07:01:50 crc kubenswrapper[4492]: I1126 07:01:50.889341 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r"] Nov 26 07:01:50 crc kubenswrapper[4492]: I1126 07:01:50.970509 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-bundle\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:50 crc kubenswrapper[4492]: I1126 07:01:50.970582 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsss6\" (UniqueName: \"kubernetes.io/projected/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-kube-api-access-jsss6\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:50 crc kubenswrapper[4492]: I1126 07:01:50.970686 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-util\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:51 crc kubenswrapper[4492]: I1126 07:01:51.071635 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-util\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:51 crc kubenswrapper[4492]: I1126 07:01:51.071733 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-bundle\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:51 crc kubenswrapper[4492]: I1126 07:01:51.071770 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsss6\" (UniqueName: \"kubernetes.io/projected/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-kube-api-access-jsss6\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:51 crc kubenswrapper[4492]: I1126 07:01:51.072148 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-util\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:51 crc kubenswrapper[4492]: I1126 07:01:51.072222 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-bundle\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:51 crc kubenswrapper[4492]: I1126 07:01:51.090210 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsss6\" (UniqueName: \"kubernetes.io/projected/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-kube-api-access-jsss6\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:51 crc kubenswrapper[4492]: I1126 07:01:51.192399 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:51 crc kubenswrapper[4492]: I1126 07:01:51.561861 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r"] Nov 26 07:01:52 crc kubenswrapper[4492]: I1126 07:01:52.513647 4492 generic.go:334] "Generic (PLEG): container finished" podID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerID="4578d47f84cc6a857a747825c4d43beb1cd6931ab7cc8d01852d946f7b446bba" exitCode=0 Nov 26 07:01:52 crc kubenswrapper[4492]: I1126 07:01:52.513710 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" event={"ID":"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f","Type":"ContainerDied","Data":"4578d47f84cc6a857a747825c4d43beb1cd6931ab7cc8d01852d946f7b446bba"} Nov 26 07:01:52 crc kubenswrapper[4492]: I1126 07:01:52.513746 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" event={"ID":"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f","Type":"ContainerStarted","Data":"5a5ed169563e0d0a4b0c8bc31c67e16e198f3e11e93c5bcb9d33c3ccb6576114"} Nov 26 07:01:53 crc kubenswrapper[4492]: I1126 07:01:53.521873 4492 generic.go:334] "Generic (PLEG): container finished" podID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerID="d984c4e96bac2ca2da65388172d2797363a04a7b9a525dc9ba781c90559cf4be" exitCode=0 Nov 26 07:01:53 crc kubenswrapper[4492]: I1126 07:01:53.521958 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" event={"ID":"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f","Type":"ContainerDied","Data":"d984c4e96bac2ca2da65388172d2797363a04a7b9a525dc9ba781c90559cf4be"} Nov 26 07:01:54 crc kubenswrapper[4492]: I1126 07:01:54.528863 4492 generic.go:334] "Generic (PLEG): container finished" podID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerID="699f38601108e20b82a2498c40089bb009c9658f98d90e8efff5594938877879" exitCode=0 Nov 26 07:01:54 crc kubenswrapper[4492]: I1126 07:01:54.528952 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" event={"ID":"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f","Type":"ContainerDied","Data":"699f38601108e20b82a2498c40089bb009c9658f98d90e8efff5594938877879"} Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.732270 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.830200 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-util\") pod \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.830251 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-bundle\") pod \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.830288 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsss6\" (UniqueName: \"kubernetes.io/projected/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-kube-api-access-jsss6\") pod \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\" (UID: \"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f\") " Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.831019 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-bundle" (OuterVolumeSpecName: "bundle") pod "e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" (UID: "e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.835913 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-kube-api-access-jsss6" (OuterVolumeSpecName: "kube-api-access-jsss6") pod "e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" (UID: "e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f"). InnerVolumeSpecName "kube-api-access-jsss6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.840213 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-util" (OuterVolumeSpecName: "util") pod "e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" (UID: "e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.931584 4492 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-util\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.931620 4492 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:55 crc kubenswrapper[4492]: I1126 07:01:55.931633 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsss6\" (UniqueName: \"kubernetes.io/projected/e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f-kube-api-access-jsss6\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:56 crc kubenswrapper[4492]: I1126 07:01:56.543892 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" event={"ID":"e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f","Type":"ContainerDied","Data":"5a5ed169563e0d0a4b0c8bc31c67e16e198f3e11e93c5bcb9d33c3ccb6576114"} Nov 26 07:01:56 crc kubenswrapper[4492]: I1126 07:01:56.543946 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a5ed169563e0d0a4b0c8bc31c67e16e198f3e11e93c5bcb9d33c3ccb6576114" Nov 26 07:01:56 crc kubenswrapper[4492]: I1126 07:01:56.543968 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006hkt5r" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.803976 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd"] Nov 26 07:02:03 crc kubenswrapper[4492]: E1126 07:02:03.804752 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerName="util" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.804767 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerName="util" Nov 26 07:02:03 crc kubenswrapper[4492]: E1126 07:02:03.804782 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerName="pull" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.804787 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerName="pull" Nov 26 07:02:03 crc kubenswrapper[4492]: E1126 07:02:03.804797 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerName="extract" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.804803 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerName="extract" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.804933 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2c560cd-2928-4dc3-a9a6-d97a50d3bf2f" containerName="extract" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.805374 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.807422 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-q7c7d" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.821252 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd"] Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.833918 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8d2z\" (UniqueName: \"kubernetes.io/projected/16d1e4be-4782-40d3-9e36-1cef62c42fbd-kube-api-access-t8d2z\") pod \"openstack-operator-controller-operator-5675dd9766-zn7fd\" (UID: \"16d1e4be-4782-40d3-9e36-1cef62c42fbd\") " pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.935728 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8d2z\" (UniqueName: \"kubernetes.io/projected/16d1e4be-4782-40d3-9e36-1cef62c42fbd-kube-api-access-t8d2z\") pod \"openstack-operator-controller-operator-5675dd9766-zn7fd\" (UID: \"16d1e4be-4782-40d3-9e36-1cef62c42fbd\") " pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" Nov 26 07:02:03 crc kubenswrapper[4492]: I1126 07:02:03.951944 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8d2z\" (UniqueName: \"kubernetes.io/projected/16d1e4be-4782-40d3-9e36-1cef62c42fbd-kube-api-access-t8d2z\") pod \"openstack-operator-controller-operator-5675dd9766-zn7fd\" (UID: \"16d1e4be-4782-40d3-9e36-1cef62c42fbd\") " pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" Nov 26 07:02:04 crc kubenswrapper[4492]: I1126 07:02:04.119724 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" Nov 26 07:02:04 crc kubenswrapper[4492]: I1126 07:02:04.511441 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd"] Nov 26 07:02:04 crc kubenswrapper[4492]: I1126 07:02:04.590780 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" event={"ID":"16d1e4be-4782-40d3-9e36-1cef62c42fbd","Type":"ContainerStarted","Data":"b0f5fb86d8f6ea798f0b716db286a66bd6945524ec5955edd3f2883732b0a2c5"} Nov 26 07:02:09 crc kubenswrapper[4492]: I1126 07:02:09.624995 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" event={"ID":"16d1e4be-4782-40d3-9e36-1cef62c42fbd","Type":"ContainerStarted","Data":"0880cec5dd8bb46847a775445b500eab0da65fe4d40c22b5303b24633be97923"} Nov 26 07:02:09 crc kubenswrapper[4492]: I1126 07:02:09.626374 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" Nov 26 07:02:09 crc kubenswrapper[4492]: I1126 07:02:09.653735 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" podStartSLOduration=2.579806461 podStartE2EDuration="6.653714759s" podCreationTimestamp="2025-11-26 07:02:03 +0000 UTC" firstStartedPulling="2025-11-26 07:02:04.514799438 +0000 UTC m=+820.398687735" lastFinishedPulling="2025-11-26 07:02:08.588707735 +0000 UTC m=+824.472596033" observedRunningTime="2025-11-26 07:02:09.648901911 +0000 UTC m=+825.532790198" watchObservedRunningTime="2025-11-26 07:02:09.653714759 +0000 UTC m=+825.537603047" Nov 26 07:02:14 crc kubenswrapper[4492]: I1126 07:02:14.123270 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-zn7fd" Nov 26 07:02:31 crc kubenswrapper[4492]: I1126 07:02:31.959577 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx"] Nov 26 07:02:31 crc kubenswrapper[4492]: I1126 07:02:31.961711 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" Nov 26 07:02:31 crc kubenswrapper[4492]: I1126 07:02:31.967049 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-4dx2c" Nov 26 07:02:31 crc kubenswrapper[4492]: I1126 07:02:31.976372 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx"] Nov 26 07:02:31 crc kubenswrapper[4492]: I1126 07:02:31.988328 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh"] Nov 26 07:02:31 crc kubenswrapper[4492]: I1126 07:02:31.989854 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" Nov 26 07:02:31 crc kubenswrapper[4492]: I1126 07:02:31.992529 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-mwhzb" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.010121 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-727xv"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.011398 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.013113 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-ptltz" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.024719 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.035124 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.036343 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.049941 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.051388 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.055155 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-9js64" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.056335 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-96ssd" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.085017 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-727xv"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.115748 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.118562 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.141973 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grqck\" (UniqueName: \"kubernetes.io/projected/4bb97d9a-923e-4292-9cdb-4e764b2e90d6-kube-api-access-grqck\") pod \"barbican-operator-controller-manager-7b64f4fb85-jh4gx\" (UID: \"4bb97d9a-923e-4292-9cdb-4e764b2e90d6\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.142028 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2zvk\" (UniqueName: \"kubernetes.io/projected/bb3706c6-2488-48fd-82f0-902371c46441-kube-api-access-m2zvk\") pod \"designate-operator-controller-manager-955677c94-727xv\" (UID: \"bb3706c6-2488-48fd-82f0-902371c46441\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.142087 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clv9d\" (UniqueName: \"kubernetes.io/projected/6a3be658-cdd4-45bb-b0ba-e7e99253e0d9-kube-api-access-clv9d\") pod \"heat-operator-controller-manager-5b77f656f-pvxlk\" (UID: \"6a3be658-cdd4-45bb-b0ba-e7e99253e0d9\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.142145 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzlbb\" (UniqueName: \"kubernetes.io/projected/3fb06526-065c-48bb-987d-02406113a06b-kube-api-access-gzlbb\") pod \"cinder-operator-controller-manager-6b7f75547b-ttzjh\" (UID: \"3fb06526-065c-48bb-987d-02406113a06b\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.190974 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.194605 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.202709 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.203875 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.208162 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-bwmv9" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.209316 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.209408 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.209667 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-8wzwd" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.213432 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.233079 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.234439 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.240313 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.241558 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-58xzq" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.241580 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.248415 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.248428 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-crrp6" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.253021 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274007 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clv9d\" (UniqueName: \"kubernetes.io/projected/6a3be658-cdd4-45bb-b0ba-e7e99253e0d9-kube-api-access-clv9d\") pod \"heat-operator-controller-manager-5b77f656f-pvxlk\" (UID: \"6a3be658-cdd4-45bb-b0ba-e7e99253e0d9\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274126 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274221 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfrqn\" (UniqueName: \"kubernetes.io/projected/72a74618-0600-42e3-8125-fd1be684497c-kube-api-access-nfrqn\") pod \"ironic-operator-controller-manager-67cb4dc6d4-q7sh4\" (UID: \"72a74618-0600-42e3-8125-fd1be684497c\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274309 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzlbb\" (UniqueName: \"kubernetes.io/projected/3fb06526-065c-48bb-987d-02406113a06b-kube-api-access-gzlbb\") pod \"cinder-operator-controller-manager-6b7f75547b-ttzjh\" (UID: \"3fb06526-065c-48bb-987d-02406113a06b\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274383 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw4qz\" (UniqueName: \"kubernetes.io/projected/783351b0-3a7d-4857-b972-ab027165e675-kube-api-access-nw4qz\") pod \"keystone-operator-controller-manager-7b4567c7cf-wtbpv\" (UID: \"783351b0-3a7d-4857-b972-ab027165e675\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274467 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj29t\" (UniqueName: \"kubernetes.io/projected/7d85a216-d74b-4b58-94a9-2ccd5bfff7d4-kube-api-access-rj29t\") pod \"glance-operator-controller-manager-589cbd6b5b-47cqn\" (UID: \"7d85a216-d74b-4b58-94a9-2ccd5bfff7d4\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274552 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9psp\" (UniqueName: \"kubernetes.io/projected/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-kube-api-access-s9psp\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274619 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjnfp\" (UniqueName: \"kubernetes.io/projected/9dce4823-5319-44c6-aa25-bc5082014598-kube-api-access-jjnfp\") pod \"horizon-operator-controller-manager-5d494799bf-jffbf\" (UID: \"9dce4823-5319-44c6-aa25-bc5082014598\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274702 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grqck\" (UniqueName: \"kubernetes.io/projected/4bb97d9a-923e-4292-9cdb-4e764b2e90d6-kube-api-access-grqck\") pod \"barbican-operator-controller-manager-7b64f4fb85-jh4gx\" (UID: \"4bb97d9a-923e-4292-9cdb-4e764b2e90d6\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.274770 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2zvk\" (UniqueName: \"kubernetes.io/projected/bb3706c6-2488-48fd-82f0-902371c46441-kube-api-access-m2zvk\") pod \"designate-operator-controller-manager-955677c94-727xv\" (UID: \"bb3706c6-2488-48fd-82f0-902371c46441\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.275811 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.282367 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.299669 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-gxdzw" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.324290 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.331718 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clv9d\" (UniqueName: \"kubernetes.io/projected/6a3be658-cdd4-45bb-b0ba-e7e99253e0d9-kube-api-access-clv9d\") pod \"heat-operator-controller-manager-5b77f656f-pvxlk\" (UID: \"6a3be658-cdd4-45bb-b0ba-e7e99253e0d9\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.332738 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzlbb\" (UniqueName: \"kubernetes.io/projected/3fb06526-065c-48bb-987d-02406113a06b-kube-api-access-gzlbb\") pod \"cinder-operator-controller-manager-6b7f75547b-ttzjh\" (UID: \"3fb06526-065c-48bb-987d-02406113a06b\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.336669 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grqck\" (UniqueName: \"kubernetes.io/projected/4bb97d9a-923e-4292-9cdb-4e764b2e90d6-kube-api-access-grqck\") pod \"barbican-operator-controller-manager-7b64f4fb85-jh4gx\" (UID: \"4bb97d9a-923e-4292-9cdb-4e764b2e90d6\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.337854 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2zvk\" (UniqueName: \"kubernetes.io/projected/bb3706c6-2488-48fd-82f0-902371c46441-kube-api-access-m2zvk\") pod \"designate-operator-controller-manager-955677c94-727xv\" (UID: \"bb3706c6-2488-48fd-82f0-902371c46441\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.342282 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.343730 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.347372 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.348872 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.353345 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.363216 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.363886 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-5ckbn" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.367361 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-c8npt" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.375968 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.376018 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfrqn\" (UniqueName: \"kubernetes.io/projected/72a74618-0600-42e3-8125-fd1be684497c-kube-api-access-nfrqn\") pod \"ironic-operator-controller-manager-67cb4dc6d4-q7sh4\" (UID: \"72a74618-0600-42e3-8125-fd1be684497c\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.376047 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkbf6\" (UniqueName: \"kubernetes.io/projected/d6eb1e59-0edd-42de-98e6-42c9b95359e2-kube-api-access-vkbf6\") pod \"manila-operator-controller-manager-5d499bf58b-vp7gp\" (UID: \"d6eb1e59-0edd-42de-98e6-42c9b95359e2\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.376073 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khhzh\" (UniqueName: \"kubernetes.io/projected/0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8-kube-api-access-khhzh\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-mw4rx\" (UID: \"0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.376115 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw4qz\" (UniqueName: \"kubernetes.io/projected/783351b0-3a7d-4857-b972-ab027165e675-kube-api-access-nw4qz\") pod \"keystone-operator-controller-manager-7b4567c7cf-wtbpv\" (UID: \"783351b0-3a7d-4857-b972-ab027165e675\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.376151 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rj29t\" (UniqueName: \"kubernetes.io/projected/7d85a216-d74b-4b58-94a9-2ccd5bfff7d4-kube-api-access-rj29t\") pod \"glance-operator-controller-manager-589cbd6b5b-47cqn\" (UID: \"7d85a216-d74b-4b58-94a9-2ccd5bfff7d4\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.376214 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2dgb\" (UniqueName: \"kubernetes.io/projected/9a52fd58-9281-424a-b211-975b007e5f38-kube-api-access-d2dgb\") pod \"neutron-operator-controller-manager-6fdcddb789-p9wsx\" (UID: \"9a52fd58-9281-424a-b211-975b007e5f38\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.376235 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9psp\" (UniqueName: \"kubernetes.io/projected/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-kube-api-access-s9psp\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.376261 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjnfp\" (UniqueName: \"kubernetes.io/projected/9dce4823-5319-44c6-aa25-bc5082014598-kube-api-access-jjnfp\") pod \"horizon-operator-controller-manager-5d494799bf-jffbf\" (UID: \"9dce4823-5319-44c6-aa25-bc5082014598\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.376583 4492 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.376626 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert podName:909cbb6d-59a9-40fb-b0c5-b08c10ef8097 nodeName:}" failed. No retries permitted until 2025-11-26 07:02:32.876609752 +0000 UTC m=+848.760498050 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert") pod "infra-operator-controller-manager-57548d458d-bbzk8" (UID: "909cbb6d-59a9-40fb-b0c5-b08c10ef8097") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.379226 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.380317 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.393307 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.404502 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-hvpvk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.406404 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.409473 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfrqn\" (UniqueName: \"kubernetes.io/projected/72a74618-0600-42e3-8125-fd1be684497c-kube-api-access-nfrqn\") pod \"ironic-operator-controller-manager-67cb4dc6d4-q7sh4\" (UID: \"72a74618-0600-42e3-8125-fd1be684497c\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.420534 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9psp\" (UniqueName: \"kubernetes.io/projected/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-kube-api-access-s9psp\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.424339 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.425663 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw4qz\" (UniqueName: \"kubernetes.io/projected/783351b0-3a7d-4857-b972-ab027165e675-kube-api-access-nw4qz\") pod \"keystone-operator-controller-manager-7b4567c7cf-wtbpv\" (UID: \"783351b0-3a7d-4857-b972-ab027165e675\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.425790 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rj29t\" (UniqueName: \"kubernetes.io/projected/7d85a216-d74b-4b58-94a9-2ccd5bfff7d4-kube-api-access-rj29t\") pod \"glance-operator-controller-manager-589cbd6b5b-47cqn\" (UID: \"7d85a216-d74b-4b58-94a9-2ccd5bfff7d4\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.426083 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.426115 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjnfp\" (UniqueName: \"kubernetes.io/projected/9dce4823-5319-44c6-aa25-bc5082014598-kube-api-access-jjnfp\") pod \"horizon-operator-controller-manager-5d494799bf-jffbf\" (UID: \"9dce4823-5319-44c6-aa25-bc5082014598\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.429465 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-b4nqc" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.435773 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.478035 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2dgb\" (UniqueName: \"kubernetes.io/projected/9a52fd58-9281-424a-b211-975b007e5f38-kube-api-access-d2dgb\") pod \"neutron-operator-controller-manager-6fdcddb789-p9wsx\" (UID: \"9a52fd58-9281-424a-b211-975b007e5f38\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.478127 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlktm\" (UniqueName: \"kubernetes.io/projected/3939ad46-695a-430c-bffb-380a366540ab-kube-api-access-qlktm\") pod \"octavia-operator-controller-manager-64cdc6ff96-2bvr2\" (UID: \"3939ad46-695a-430c-bffb-380a366540ab\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.478200 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkbf6\" (UniqueName: \"kubernetes.io/projected/d6eb1e59-0edd-42de-98e6-42c9b95359e2-kube-api-access-vkbf6\") pod \"manila-operator-controller-manager-5d499bf58b-vp7gp\" (UID: \"d6eb1e59-0edd-42de-98e6-42c9b95359e2\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.478223 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khhzh\" (UniqueName: \"kubernetes.io/projected/0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8-kube-api-access-khhzh\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-mw4rx\" (UID: \"0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.478265 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvz46\" (UniqueName: \"kubernetes.io/projected/9dc0fcb4-c9c5-48c7-ace0-1758df8292ef-kube-api-access-mvz46\") pod \"nova-operator-controller-manager-79556f57fc-rxz2r\" (UID: \"9dc0fcb4-c9c5-48c7-ace0-1758df8292ef\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.480770 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.482104 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.486139 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-qjmd4" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.486323 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.502847 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.504353 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.506216 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.511907 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-58kvd" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.522735 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khhzh\" (UniqueName: \"kubernetes.io/projected/0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8-kube-api-access-khhzh\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-mw4rx\" (UID: \"0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.524662 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2dgb\" (UniqueName: \"kubernetes.io/projected/9a52fd58-9281-424a-b211-975b007e5f38-kube-api-access-d2dgb\") pod \"neutron-operator-controller-manager-6fdcddb789-p9wsx\" (UID: \"9a52fd58-9281-424a-b211-975b007e5f38\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.525715 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.531268 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.535323 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkbf6\" (UniqueName: \"kubernetes.io/projected/d6eb1e59-0edd-42de-98e6-42c9b95359e2-kube-api-access-vkbf6\") pod \"manila-operator-controller-manager-5d499bf58b-vp7gp\" (UID: \"d6eb1e59-0edd-42de-98e6-42c9b95359e2\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.542474 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.543572 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.608219 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.608778 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.623944 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-rx4q2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.625132 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.635505 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-n99np" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.643788 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.646328 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.648146 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.652753 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.659015 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlktm\" (UniqueName: \"kubernetes.io/projected/3939ad46-695a-430c-bffb-380a366540ab-kube-api-access-qlktm\") pod \"octavia-operator-controller-manager-64cdc6ff96-2bvr2\" (UID: \"3939ad46-695a-430c-bffb-380a366540ab\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.659105 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.659136 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6ngd\" (UniqueName: \"kubernetes.io/projected/a2a736ce-f708-47dc-a121-75bc30b70b1e-kube-api-access-s6ngd\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.659246 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tscs2\" (UniqueName: \"kubernetes.io/projected/1ece66ff-2fba-4ff2-b3fc-35c105b9915e-kube-api-access-tscs2\") pod \"placement-operator-controller-manager-57988cc5b5-bwwkk\" (UID: \"1ece66ff-2fba-4ff2-b3fc-35c105b9915e\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.659275 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvz46\" (UniqueName: \"kubernetes.io/projected/9dc0fcb4-c9c5-48c7-ace0-1758df8292ef-kube-api-access-mvz46\") pod \"nova-operator-controller-manager-79556f57fc-rxz2r\" (UID: \"9dc0fcb4-c9c5-48c7-ace0-1758df8292ef\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.659297 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8frwq\" (UniqueName: \"kubernetes.io/projected/095328fd-0dc7-49a2-86fa-53c5da397363-kube-api-access-8frwq\") pod \"ovn-operator-controller-manager-56897c768d-5g6s2\" (UID: \"095328fd-0dc7-49a2-86fa-53c5da397363\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.665348 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.689344 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlktm\" (UniqueName: \"kubernetes.io/projected/3939ad46-695a-430c-bffb-380a366540ab-kube-api-access-qlktm\") pod \"octavia-operator-controller-manager-64cdc6ff96-2bvr2\" (UID: \"3939ad46-695a-430c-bffb-380a366540ab\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.728411 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.728788 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvz46\" (UniqueName: \"kubernetes.io/projected/9dc0fcb4-c9c5-48c7-ace0-1758df8292ef-kube-api-access-mvz46\") pod \"nova-operator-controller-manager-79556f57fc-rxz2r\" (UID: \"9dc0fcb4-c9c5-48c7-ace0-1758df8292ef\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.731327 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.737874 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.744641 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.746012 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.749258 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.749714 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-4qz5x" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.759097 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.766291 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.767279 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp7m2\" (UniqueName: \"kubernetes.io/projected/f99ecfd3-ef8b-4a69-bd29-e55b31465022-kube-api-access-fp7m2\") pod \"swift-operator-controller-manager-d77b94747-d6mh8\" (UID: \"f99ecfd3-ef8b-4a69-bd29-e55b31465022\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.767477 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.767503 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6ngd\" (UniqueName: \"kubernetes.io/projected/a2a736ce-f708-47dc-a121-75bc30b70b1e-kube-api-access-s6ngd\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.767519 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.767534 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tscs2\" (UniqueName: \"kubernetes.io/projected/1ece66ff-2fba-4ff2-b3fc-35c105b9915e-kube-api-access-tscs2\") pod \"placement-operator-controller-manager-57988cc5b5-bwwkk\" (UID: \"1ece66ff-2fba-4ff2-b3fc-35c105b9915e\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.767569 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8frwq\" (UniqueName: \"kubernetes.io/projected/095328fd-0dc7-49a2-86fa-53c5da397363-kube-api-access-8frwq\") pod \"ovn-operator-controller-manager-56897c768d-5g6s2\" (UID: \"095328fd-0dc7-49a2-86fa-53c5da397363\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.767883 4492 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.767938 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert podName:a2a736ce-f708-47dc-a121-75bc30b70b1e nodeName:}" failed. No retries permitted until 2025-11-26 07:02:33.267924541 +0000 UTC m=+849.151812838 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" (UID: "a2a736ce-f708-47dc-a121-75bc30b70b1e") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.779222 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.780299 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.783572 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-bskp2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.785619 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.786586 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-9q97s" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.790217 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.798428 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.799740 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tscs2\" (UniqueName: \"kubernetes.io/projected/1ece66ff-2fba-4ff2-b3fc-35c105b9915e-kube-api-access-tscs2\") pod \"placement-operator-controller-manager-57988cc5b5-bwwkk\" (UID: \"1ece66ff-2fba-4ff2-b3fc-35c105b9915e\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.809813 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8frwq\" (UniqueName: \"kubernetes.io/projected/095328fd-0dc7-49a2-86fa-53c5da397363-kube-api-access-8frwq\") pod \"ovn-operator-controller-manager-56897c768d-5g6s2\" (UID: \"095328fd-0dc7-49a2-86fa-53c5da397363\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.812587 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.814645 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.815627 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.822275 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6ngd\" (UniqueName: \"kubernetes.io/projected/a2a736ce-f708-47dc-a121-75bc30b70b1e-kube-api-access-s6ngd\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.825747 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.825992 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-s9v6b" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.827295 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.863576 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.872053 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.873805 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chv8m\" (UniqueName: \"kubernetes.io/projected/a80f2e93-10b0-4168-9de5-8116b78676b2-kube-api-access-chv8m\") pod \"telemetry-operator-controller-manager-76cc84c6bb-dvbx8\" (UID: \"a80f2e93-10b0-4168-9de5-8116b78676b2\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.874003 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.875231 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp7m2\" (UniqueName: \"kubernetes.io/projected/f99ecfd3-ef8b-4a69-bd29-e55b31465022-kube-api-access-fp7m2\") pod \"swift-operator-controller-manager-d77b94747-d6mh8\" (UID: \"f99ecfd3-ef8b-4a69-bd29-e55b31465022\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.875348 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zj9n9\" (UniqueName: \"kubernetes.io/projected/3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02-kube-api-access-zj9n9\") pod \"watcher-operator-controller-manager-656dcb59d4-b88tl\" (UID: \"3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.875434 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77rqd\" (UniqueName: \"kubernetes.io/projected/fdb12da6-30c3-43f1-adbd-942cad0de886-kube-api-access-77rqd\") pod \"test-operator-controller-manager-5cd6c7f4c8-vv4f8\" (UID: \"fdb12da6-30c3-43f1-adbd-942cad0de886\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.875576 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khzt5\" (UniqueName: \"kubernetes.io/projected/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-kube-api-access-khzt5\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.876951 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.912862 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp7m2\" (UniqueName: \"kubernetes.io/projected/f99ecfd3-ef8b-4a69-bd29-e55b31465022-kube-api-access-fp7m2\") pod \"swift-operator-controller-manager-d77b94747-d6mh8\" (UID: \"f99ecfd3-ef8b-4a69-bd29-e55b31465022\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.931219 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.932310 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.935605 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m"] Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.940867 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-dndcc" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.978206 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.978245 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pts9g\" (UniqueName: \"kubernetes.io/projected/0c48ee47-de9c-455c-a366-48c296180ff9-kube-api-access-pts9g\") pod \"rabbitmq-cluster-operator-manager-668c99d594-xlj7m\" (UID: \"0c48ee47-de9c-455c-a366-48c296180ff9\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.978280 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.978319 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zj9n9\" (UniqueName: \"kubernetes.io/projected/3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02-kube-api-access-zj9n9\") pod \"watcher-operator-controller-manager-656dcb59d4-b88tl\" (UID: \"3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.978340 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77rqd\" (UniqueName: \"kubernetes.io/projected/fdb12da6-30c3-43f1-adbd-942cad0de886-kube-api-access-77rqd\") pod \"test-operator-controller-manager-5cd6c7f4c8-vv4f8\" (UID: \"fdb12da6-30c3-43f1-adbd-942cad0de886\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.978375 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khzt5\" (UniqueName: \"kubernetes.io/projected/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-kube-api-access-khzt5\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.978391 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.978419 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chv8m\" (UniqueName: \"kubernetes.io/projected/a80f2e93-10b0-4168-9de5-8116b78676b2-kube-api-access-chv8m\") pod \"telemetry-operator-controller-manager-76cc84c6bb-dvbx8\" (UID: \"a80f2e93-10b0-4168-9de5-8116b78676b2\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.978750 4492 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.978789 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:33.478776277 +0000 UTC m=+849.362664575 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "webhook-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.979764 4492 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.979790 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert podName:909cbb6d-59a9-40fb-b0c5-b08c10ef8097 nodeName:}" failed. No retries permitted until 2025-11-26 07:02:33.979780877 +0000 UTC m=+849.863669164 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert") pod "infra-operator-controller-manager-57548d458d-bbzk8" (UID: "909cbb6d-59a9-40fb-b0c5-b08c10ef8097") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.980159 4492 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: E1126 07:02:32.980197 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:33.480190336 +0000 UTC m=+849.364078634 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "metrics-server-cert" not found Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.980283 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" Nov 26 07:02:32 crc kubenswrapper[4492]: I1126 07:02:32.980643 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.011118 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zj9n9\" (UniqueName: \"kubernetes.io/projected/3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02-kube-api-access-zj9n9\") pod \"watcher-operator-controller-manager-656dcb59d4-b88tl\" (UID: \"3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.011244 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khzt5\" (UniqueName: \"kubernetes.io/projected/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-kube-api-access-khzt5\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.025699 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.029812 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77rqd\" (UniqueName: \"kubernetes.io/projected/fdb12da6-30c3-43f1-adbd-942cad0de886-kube-api-access-77rqd\") pod \"test-operator-controller-manager-5cd6c7f4c8-vv4f8\" (UID: \"fdb12da6-30c3-43f1-adbd-942cad0de886\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.032488 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chv8m\" (UniqueName: \"kubernetes.io/projected/a80f2e93-10b0-4168-9de5-8116b78676b2-kube-api-access-chv8m\") pod \"telemetry-operator-controller-manager-76cc84c6bb-dvbx8\" (UID: \"a80f2e93-10b0-4168-9de5-8116b78676b2\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.038070 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf"] Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.049644 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk"] Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.084713 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pts9g\" (UniqueName: \"kubernetes.io/projected/0c48ee47-de9c-455c-a366-48c296180ff9-kube-api-access-pts9g\") pod \"rabbitmq-cluster-operator-manager-668c99d594-xlj7m\" (UID: \"0c48ee47-de9c-455c-a366-48c296180ff9\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.113073 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pts9g\" (UniqueName: \"kubernetes.io/projected/0c48ee47-de9c-455c-a366-48c296180ff9-kube-api-access-pts9g\") pod \"rabbitmq-cluster-operator-manager-668c99d594-xlj7m\" (UID: \"0c48ee47-de9c-455c-a366-48c296180ff9\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.128840 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.158408 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" Nov 26 07:02:33 crc kubenswrapper[4492]: W1126 07:02:33.170465 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9dce4823_5319_44c6_aa25_bc5082014598.slice/crio-bdaf73473e4cb445fc64023f542038465b2221da55f05a706f048e850bc55b3a WatchSource:0}: Error finding container bdaf73473e4cb445fc64023f542038465b2221da55f05a706f048e850bc55b3a: Status 404 returned error can't find the container with id bdaf73473e4cb445fc64023f542038465b2221da55f05a706f048e850bc55b3a Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.195455 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.284593 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.288254 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:33 crc kubenswrapper[4492]: E1126 07:02:33.288440 4492 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:33 crc kubenswrapper[4492]: E1126 07:02:33.288485 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert podName:a2a736ce-f708-47dc-a121-75bc30b70b1e nodeName:}" failed. No retries permitted until 2025-11-26 07:02:34.288471647 +0000 UTC m=+850.172359945 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" (UID: "a2a736ce-f708-47dc-a121-75bc30b70b1e") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.490571 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:33 crc kubenswrapper[4492]: E1126 07:02:33.490701 4492 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:02:33 crc kubenswrapper[4492]: E1126 07:02:33.490749 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:34.490736058 +0000 UTC m=+850.374624347 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "metrics-server-cert" not found Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.491101 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:33 crc kubenswrapper[4492]: E1126 07:02:33.491625 4492 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:02:33 crc kubenswrapper[4492]: E1126 07:02:33.491650 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:34.491643304 +0000 UTC m=+850.375531592 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "webhook-server-cert" not found Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.536631 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-727xv"] Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.799187 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" event={"ID":"9dce4823-5319-44c6-aa25-bc5082014598","Type":"ContainerStarted","Data":"bdaf73473e4cb445fc64023f542038465b2221da55f05a706f048e850bc55b3a"} Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.803107 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" event={"ID":"6a3be658-cdd4-45bb-b0ba-e7e99253e0d9","Type":"ContainerStarted","Data":"2401144146d538a9917c3dee2c183263b7aba418cb2ba393bd66f3839f2cdb1f"} Nov 26 07:02:33 crc kubenswrapper[4492]: I1126 07:02:33.804265 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" event={"ID":"bb3706c6-2488-48fd-82f0-902371c46441","Type":"ContainerStarted","Data":"0716e734afe032fe8e3e9507d94fc7712df6083111f5cdd60b22cdacc9c2533a"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.002972 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.003263 4492 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.003339 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert podName:909cbb6d-59a9-40fb-b0c5-b08c10ef8097 nodeName:}" failed. No retries permitted until 2025-11-26 07:02:36.003313891 +0000 UTC m=+851.887202190 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert") pod "infra-operator-controller-manager-57548d458d-bbzk8" (UID: "909cbb6d-59a9-40fb-b0c5-b08c10ef8097") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.253148 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx"] Nov 26 07:02:34 crc kubenswrapper[4492]: W1126 07:02:34.265279 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b086d5f_ee5c_48d2_bf56_29bd8b3b6ba8.slice/crio-12c2eb539eb4cd1b8b4ccdc4e6174e05b344b473561e63839c73bae4947f8d43 WatchSource:0}: Error finding container 12c2eb539eb4cd1b8b4ccdc4e6174e05b344b473561e63839c73bae4947f8d43: Status 404 returned error can't find the container with id 12c2eb539eb4cd1b8b4ccdc4e6174e05b344b473561e63839c73bae4947f8d43 Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.276326 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.287235 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.291234 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.307357 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.307568 4492 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.307611 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert podName:a2a736ce-f708-47dc-a121-75bc30b70b1e nodeName:}" failed. No retries permitted until 2025-11-26 07:02:36.307598959 +0000 UTC m=+852.191487257 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" (UID: "a2a736ce-f708-47dc-a121-75bc30b70b1e") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.319160 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.332855 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.338913 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx"] Nov 26 07:02:34 crc kubenswrapper[4492]: W1126 07:02:34.390129 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4bb97d9a_923e_4292_9cdb_4e764b2e90d6.slice/crio-cae66c7d2d79d410c1e0c839fb87ee224999aaa248cd42ed7c2471309d0bef96 WatchSource:0}: Error finding container cae66c7d2d79d410c1e0c839fb87ee224999aaa248cd42ed7c2471309d0bef96: Status 404 returned error can't find the container with id cae66c7d2d79d410c1e0c839fb87ee224999aaa248cd42ed7c2471309d0bef96 Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.511116 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.511235 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.511410 4492 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.511452 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:36.511438301 +0000 UTC m=+852.395326600 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "webhook-server-cert" not found Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.511780 4492 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.511909 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:36.511881494 +0000 UTC m=+852.395769793 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "metrics-server-cert" not found Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.702579 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.728820 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2"] Nov 26 07:02:34 crc kubenswrapper[4492]: W1126 07:02:34.738288 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda80f2e93_10b0_4168_9de5_8116b78676b2.slice/crio-c3d07154ae987376fa0f7003e45e08335e94325bc5a11d270f39998f11974a55 WatchSource:0}: Error finding container c3d07154ae987376fa0f7003e45e08335e94325bc5a11d270f39998f11974a55: Status 404 returned error can't find the container with id c3d07154ae987376fa0f7003e45e08335e94325bc5a11d270f39998f11974a55 Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.740514 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zj9n9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-b88tl_openstack-operators(3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.750240 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zj9n9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-b88tl_openstack-operators(3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.750475 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qlktm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-2bvr2_openstack-operators(3939ad46-695a-430c-bffb-380a366540ab): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.750702 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mvz46,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-rxz2r_openstack-operators(9dc0fcb4-c9c5-48c7-ace0-1758df8292ef): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.751989 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" podUID="3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.752656 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8"] Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.768471 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mvz46,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-rxz2r_openstack-operators(9dc0fcb4-c9c5-48c7-ace0-1758df8292ef): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.768490 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qlktm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-2bvr2_openstack-operators(3939ad46-695a-430c-bffb-380a366540ab): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.769708 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" podUID="3939ad46-695a-430c-bffb-380a366540ab" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.769773 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" podUID="9dc0fcb4-c9c5-48c7-ace0-1758df8292ef" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.770370 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-chv8m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-dvbx8_openstack-operators(a80f2e93-10b0-4168-9de5-8116b78676b2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.773220 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4"] Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.777585 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-chv8m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-dvbx8_openstack-operators(a80f2e93-10b0-4168-9de5-8116b78676b2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.777661 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk"] Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.778658 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" podUID="a80f2e93-10b0-4168-9de5-8116b78676b2" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.787697 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vkbf6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-vp7gp_openstack-operators(d6eb1e59-0edd-42de-98e6-42c9b95359e2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.792309 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8"] Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.793016 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vkbf6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-vp7gp_openstack-operators(d6eb1e59-0edd-42de-98e6-42c9b95359e2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.794411 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" podUID="d6eb1e59-0edd-42de-98e6-42c9b95359e2" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.796214 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.801105 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.806079 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.809520 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r"] Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.816462 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" event={"ID":"9a52fd58-9281-424a-b211-975b007e5f38","Type":"ContainerStarted","Data":"92250929a61cfcf26de4e4129f720db60008d7b969087cd21ee80376f54a20cb"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.817951 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" event={"ID":"d6eb1e59-0edd-42de-98e6-42c9b95359e2","Type":"ContainerStarted","Data":"b78aefbaa1ed7abc07e9bc5f3ca9af0fb39288675a15d267c00910560d659a18"} Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.827278 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" podUID="d6eb1e59-0edd-42de-98e6-42c9b95359e2" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.828814 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" event={"ID":"72a74618-0600-42e3-8125-fd1be684497c","Type":"ContainerStarted","Data":"96a78c7554c0d252b937911b0fd0844b3a7085e5d9246fb248343db6b39b040c"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.836809 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" event={"ID":"783351b0-3a7d-4857-b972-ab027165e675","Type":"ContainerStarted","Data":"1533e7e5b55ca3ceeb0233448378d8d3690f149c9b955ced07ce89d1d5597536"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.849022 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" event={"ID":"3939ad46-695a-430c-bffb-380a366540ab","Type":"ContainerStarted","Data":"4f569a933c61a8b15e844d7362b8bab1b2e411b804ee9d65a8d7c1719c95bfb7"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.851944 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" event={"ID":"4bb97d9a-923e-4292-9cdb-4e764b2e90d6","Type":"ContainerStarted","Data":"cae66c7d2d79d410c1e0c839fb87ee224999aaa248cd42ed7c2471309d0bef96"} Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.853454 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" podUID="3939ad46-695a-430c-bffb-380a366540ab" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.854252 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" event={"ID":"3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02","Type":"ContainerStarted","Data":"a843dddfd7ff41be9bd9f9b684a543483f51b2c553b2404f930d4f72c8e71301"} Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.858275 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" podUID="3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.861684 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" event={"ID":"fdb12da6-30c3-43f1-adbd-942cad0de886","Type":"ContainerStarted","Data":"3ee47aa04042a41df1073bafa6bef2e5b7139dc5372af27c09a6c5d4d14c4a04"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.862727 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" event={"ID":"3fb06526-065c-48bb-987d-02406113a06b","Type":"ContainerStarted","Data":"2ef4a45f8230bb0f0b58a749ddf8330c119cfcb312fcbc602951a59d5db1e0da"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.863829 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" event={"ID":"7d85a216-d74b-4b58-94a9-2ccd5bfff7d4","Type":"ContainerStarted","Data":"43cb29fee217234b6c8c524cd961498c46cad9e849afb6b68f01bd0a4cdd5e40"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.867206 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" event={"ID":"1ece66ff-2fba-4ff2-b3fc-35c105b9915e","Type":"ContainerStarted","Data":"21dfc23dd03a883af7aa1d3fb9719d685efb4e2a0dc7653952259c4ee4a1b517"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.868457 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" event={"ID":"a80f2e93-10b0-4168-9de5-8116b78676b2","Type":"ContainerStarted","Data":"c3d07154ae987376fa0f7003e45e08335e94325bc5a11d270f39998f11974a55"} Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.874527 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" podUID="a80f2e93-10b0-4168-9de5-8116b78676b2" Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.884687 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" event={"ID":"0c48ee47-de9c-455c-a366-48c296180ff9","Type":"ContainerStarted","Data":"e86cb1a56ebf82a9f6d723618107346635b8051c977bb1b2c0dbc0b7fabdf2d7"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.886475 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" event={"ID":"095328fd-0dc7-49a2-86fa-53c5da397363","Type":"ContainerStarted","Data":"6ef5c8deeb761cbafa081b2406bd80bfcfbd13347fb52d2e90405d56838a2642"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.887445 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" event={"ID":"f99ecfd3-ef8b-4a69-bd29-e55b31465022","Type":"ContainerStarted","Data":"c3163cc3de373422337b13f3cffbe2cb5b450b5b663e7cc692c2879f04afbb09"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.889719 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" event={"ID":"0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8","Type":"ContainerStarted","Data":"12c2eb539eb4cd1b8b4ccdc4e6174e05b344b473561e63839c73bae4947f8d43"} Nov 26 07:02:34 crc kubenswrapper[4492]: I1126 07:02:34.893354 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" event={"ID":"9dc0fcb4-c9c5-48c7-ace0-1758df8292ef","Type":"ContainerStarted","Data":"c00d84c5117d50a302b4b003666adf8a008a2b467bdbe9d7136ac7205740edb6"} Nov 26 07:02:34 crc kubenswrapper[4492]: E1126 07:02:34.897078 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" podUID="9dc0fcb4-c9c5-48c7-ace0-1758df8292ef" Nov 26 07:02:35 crc kubenswrapper[4492]: E1126 07:02:35.966910 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" podUID="3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02" Nov 26 07:02:35 crc kubenswrapper[4492]: E1126 07:02:35.967732 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" podUID="d6eb1e59-0edd-42de-98e6-42c9b95359e2" Nov 26 07:02:35 crc kubenswrapper[4492]: E1126 07:02:35.967847 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" podUID="9dc0fcb4-c9c5-48c7-ace0-1758df8292ef" Nov 26 07:02:35 crc kubenswrapper[4492]: E1126 07:02:35.967861 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" podUID="a80f2e93-10b0-4168-9de5-8116b78676b2" Nov 26 07:02:35 crc kubenswrapper[4492]: E1126 07:02:35.967976 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" podUID="3939ad46-695a-430c-bffb-380a366540ab" Nov 26 07:02:36 crc kubenswrapper[4492]: I1126 07:02:36.079060 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:36 crc kubenswrapper[4492]: E1126 07:02:36.079227 4492 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:36 crc kubenswrapper[4492]: E1126 07:02:36.079289 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert podName:909cbb6d-59a9-40fb-b0c5-b08c10ef8097 nodeName:}" failed. No retries permitted until 2025-11-26 07:02:40.079271113 +0000 UTC m=+855.963159411 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert") pod "infra-operator-controller-manager-57548d458d-bbzk8" (UID: "909cbb6d-59a9-40fb-b0c5-b08c10ef8097") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:36 crc kubenswrapper[4492]: I1126 07:02:36.384520 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:36 crc kubenswrapper[4492]: E1126 07:02:36.384720 4492 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:36 crc kubenswrapper[4492]: E1126 07:02:36.384775 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert podName:a2a736ce-f708-47dc-a121-75bc30b70b1e nodeName:}" failed. No retries permitted until 2025-11-26 07:02:40.384757199 +0000 UTC m=+856.268645496 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" (UID: "a2a736ce-f708-47dc-a121-75bc30b70b1e") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:36 crc kubenswrapper[4492]: I1126 07:02:36.592921 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:36 crc kubenswrapper[4492]: I1126 07:02:36.593081 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:36 crc kubenswrapper[4492]: E1126 07:02:36.593137 4492 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:02:36 crc kubenswrapper[4492]: E1126 07:02:36.593238 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:40.593217149 +0000 UTC m=+856.477105447 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "metrics-server-cert" not found Nov 26 07:02:36 crc kubenswrapper[4492]: E1126 07:02:36.593293 4492 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:02:36 crc kubenswrapper[4492]: E1126 07:02:36.593375 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:40.593355579 +0000 UTC m=+856.477243877 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "webhook-server-cert" not found Nov 26 07:02:40 crc kubenswrapper[4492]: I1126 07:02:40.152717 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:40 crc kubenswrapper[4492]: E1126 07:02:40.152867 4492 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:40 crc kubenswrapper[4492]: E1126 07:02:40.152930 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert podName:909cbb6d-59a9-40fb-b0c5-b08c10ef8097 nodeName:}" failed. No retries permitted until 2025-11-26 07:02:48.152914834 +0000 UTC m=+864.036803132 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert") pod "infra-operator-controller-manager-57548d458d-bbzk8" (UID: "909cbb6d-59a9-40fb-b0c5-b08c10ef8097") : secret "infra-operator-webhook-server-cert" not found Nov 26 07:02:40 crc kubenswrapper[4492]: I1126 07:02:40.455719 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:40 crc kubenswrapper[4492]: E1126 07:02:40.455877 4492 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:40 crc kubenswrapper[4492]: E1126 07:02:40.455930 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert podName:a2a736ce-f708-47dc-a121-75bc30b70b1e nodeName:}" failed. No retries permitted until 2025-11-26 07:02:48.455915396 +0000 UTC m=+864.339803694 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" (UID: "a2a736ce-f708-47dc-a121-75bc30b70b1e") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 07:02:40 crc kubenswrapper[4492]: I1126 07:02:40.658746 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:40 crc kubenswrapper[4492]: I1126 07:02:40.658851 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:40 crc kubenswrapper[4492]: E1126 07:02:40.658933 4492 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 07:02:40 crc kubenswrapper[4492]: E1126 07:02:40.659018 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:48.658988197 +0000 UTC m=+864.542876495 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "metrics-server-cert" not found Nov 26 07:02:40 crc kubenswrapper[4492]: E1126 07:02:40.659041 4492 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 07:02:40 crc kubenswrapper[4492]: E1126 07:02:40.659103 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs podName:3f02f890-8b9a-48b0-9b9d-7c8280f4639b nodeName:}" failed. No retries permitted until 2025-11-26 07:02:48.659087273 +0000 UTC m=+864.542975561 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-kchvg" (UID: "3f02f890-8b9a-48b0-9b9d-7c8280f4639b") : secret "webhook-server-cert" not found Nov 26 07:02:47 crc kubenswrapper[4492]: E1126 07:02:47.738224 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa" Nov 26 07:02:47 crc kubenswrapper[4492]: E1126 07:02:47.740205 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-77rqd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-vv4f8_openstack-operators(fdb12da6-30c3-43f1-adbd-942cad0de886): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.178165 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.194753 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/909cbb6d-59a9-40fb-b0c5-b08c10ef8097-cert\") pod \"infra-operator-controller-manager-57548d458d-bbzk8\" (UID: \"909cbb6d-59a9-40fb-b0c5-b08c10ef8097\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:48 crc kubenswrapper[4492]: E1126 07:02:48.284017 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c" Nov 26 07:02:48 crc kubenswrapper[4492]: E1126 07:02:48.284250 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-grqck,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7b64f4fb85-jh4gx_openstack-operators(4bb97d9a-923e-4292-9cdb-4e764b2e90d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.421371 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.482465 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.486706 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2a736ce-f708-47dc-a121-75bc30b70b1e-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx\" (UID: \"a2a736ce-f708-47dc-a121-75bc30b70b1e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.549480 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.689527 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.689609 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.695682 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.714059 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3f02f890-8b9a-48b0-9b9d-7c8280f4639b-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-kchvg\" (UID: \"3f02f890-8b9a-48b0-9b9d-7c8280f4639b\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:48 crc kubenswrapper[4492]: I1126 07:02:48.814057 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:02:49 crc kubenswrapper[4492]: E1126 07:02:49.341958 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:ec4e5c911c1d0f1ea211a04b251a9d2e95b69d141c1caf07a0381693b2d6368b" Nov 26 07:02:49 crc kubenswrapper[4492]: E1126 07:02:49.342300 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:ec4e5c911c1d0f1ea211a04b251a9d2e95b69d141c1caf07a0381693b2d6368b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m2zvk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-955677c94-727xv_openstack-operators(bb3706c6-2488-48fd-82f0-902371c46441): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:02:49 crc kubenswrapper[4492]: E1126 07:02:49.933514 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2" Nov 26 07:02:49 crc kubenswrapper[4492]: E1126 07:02:49.933784 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-khhzh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-mw4rx_openstack-operators(0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:02:50 crc kubenswrapper[4492]: E1126 07:02:50.805500 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:9413ed1bc2ae1a6bd28c59b1c7f7e91e1638de7b2a7d4729ed3fa2135182465d" Nov 26 07:02:50 crc kubenswrapper[4492]: E1126 07:02:50.806277 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:9413ed1bc2ae1a6bd28c59b1c7f7e91e1638de7b2a7d4729ed3fa2135182465d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jjnfp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-5d494799bf-jffbf_openstack-operators(9dce4823-5319-44c6-aa25-bc5082014598): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:02:53 crc kubenswrapper[4492]: E1126 07:02:53.053122 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7" Nov 26 07:02:53 crc kubenswrapper[4492]: E1126 07:02:53.054116 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nfrqn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-q7sh4_openstack-operators(72a74618-0600-42e3-8125-fd1be684497c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:02:53 crc kubenswrapper[4492]: E1126 07:02:53.602372 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711" Nov 26 07:02:53 crc kubenswrapper[4492]: E1126 07:02:53.602507 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nw4qz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-wtbpv_openstack-operators(783351b0-3a7d-4857-b972-ab027165e675): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:02:55 crc kubenswrapper[4492]: E1126 07:02:55.729573 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 26 07:02:55 crc kubenswrapper[4492]: E1126 07:02:55.730446 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pts9g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-xlj7m_openstack-operators(0c48ee47-de9c-455c-a366-48c296180ff9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:02:55 crc kubenswrapper[4492]: E1126 07:02:55.731670 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" podUID="0c48ee47-de9c-455c-a366-48c296180ff9" Nov 26 07:02:56 crc kubenswrapper[4492]: E1126 07:02:56.083325 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" podUID="0c48ee47-de9c-455c-a366-48c296180ff9" Nov 26 07:02:59 crc kubenswrapper[4492]: I1126 07:02:59.084414 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg"] Nov 26 07:02:59 crc kubenswrapper[4492]: I1126 07:02:59.137233 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx"] Nov 26 07:02:59 crc kubenswrapper[4492]: I1126 07:02:59.166240 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8"] Nov 26 07:02:59 crc kubenswrapper[4492]: W1126 07:02:59.466383 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f02f890_8b9a_48b0_9b9d_7c8280f4639b.slice/crio-e7d9e3dbd98d6471cfb0cd3dd85f78f4dd735ff2f2b83074caf87caa356338dd WatchSource:0}: Error finding container e7d9e3dbd98d6471cfb0cd3dd85f78f4dd735ff2f2b83074caf87caa356338dd: Status 404 returned error can't find the container with id e7d9e3dbd98d6471cfb0cd3dd85f78f4dd735ff2f2b83074caf87caa356338dd Nov 26 07:02:59 crc kubenswrapper[4492]: W1126 07:02:59.470581 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod909cbb6d_59a9_40fb_b0c5_b08c10ef8097.slice/crio-f5391b044e7d3766230a17c27f8d4fb6d080e0ddf3d41195584ac0f3cef8d39a WatchSource:0}: Error finding container f5391b044e7d3766230a17c27f8d4fb6d080e0ddf3d41195584ac0f3cef8d39a: Status 404 returned error can't find the container with id f5391b044e7d3766230a17c27f8d4fb6d080e0ddf3d41195584ac0f3cef8d39a Nov 26 07:03:00 crc kubenswrapper[4492]: I1126 07:03:00.174123 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" event={"ID":"095328fd-0dc7-49a2-86fa-53c5da397363","Type":"ContainerStarted","Data":"7a0184729887c9baee5eff9a7e78d29ba339fbf0ec972e89e524f7618a2d7172"} Nov 26 07:03:00 crc kubenswrapper[4492]: I1126 07:03:00.182384 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" event={"ID":"a2a736ce-f708-47dc-a121-75bc30b70b1e","Type":"ContainerStarted","Data":"8c64d6964001c1ed867a6d354b8f51213b62093e4c1c227d06444eb11b5ac986"} Nov 26 07:03:00 crc kubenswrapper[4492]: I1126 07:03:00.184078 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" event={"ID":"f99ecfd3-ef8b-4a69-bd29-e55b31465022","Type":"ContainerStarted","Data":"2e77bec0311899310b16a8b545b1adfadf74a6baa1535758c2165b4839aca7b6"} Nov 26 07:03:00 crc kubenswrapper[4492]: I1126 07:03:00.190333 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" event={"ID":"1ece66ff-2fba-4ff2-b3fc-35c105b9915e","Type":"ContainerStarted","Data":"eafce4b01d708ff6dc5c015d2cc2ab79106d620febaadb55def2fb9da5724253"} Nov 26 07:03:00 crc kubenswrapper[4492]: I1126 07:03:00.202293 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" event={"ID":"3f02f890-8b9a-48b0-9b9d-7c8280f4639b","Type":"ContainerStarted","Data":"e7d9e3dbd98d6471cfb0cd3dd85f78f4dd735ff2f2b83074caf87caa356338dd"} Nov 26 07:03:00 crc kubenswrapper[4492]: I1126 07:03:00.206803 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" event={"ID":"3fb06526-065c-48bb-987d-02406113a06b","Type":"ContainerStarted","Data":"43a3087a7e8712b9876ab91d36bc188fe9552481dc0924cc3a1124dc4133e63b"} Nov 26 07:03:00 crc kubenswrapper[4492]: I1126 07:03:00.219037 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" event={"ID":"7d85a216-d74b-4b58-94a9-2ccd5bfff7d4","Type":"ContainerStarted","Data":"47bb44ce589c7ff09b838c6dd8be82f6743575d3c51211d6f92c22ba19c7bf50"} Nov 26 07:03:00 crc kubenswrapper[4492]: I1126 07:03:00.226924 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" event={"ID":"909cbb6d-59a9-40fb-b0c5-b08c10ef8097","Type":"ContainerStarted","Data":"f5391b044e7d3766230a17c27f8d4fb6d080e0ddf3d41195584ac0f3cef8d39a"} Nov 26 07:03:00 crc kubenswrapper[4492]: I1126 07:03:00.253286 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" event={"ID":"6a3be658-cdd4-45bb-b0ba-e7e99253e0d9","Type":"ContainerStarted","Data":"f178cc29fb4e1992958d19a6e96033d566e9a4006ec25b5b663792e343097ea9"} Nov 26 07:03:01 crc kubenswrapper[4492]: I1126 07:03:01.270803 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" event={"ID":"3939ad46-695a-430c-bffb-380a366540ab","Type":"ContainerStarted","Data":"19870798ebcfd4fb08078a136e35207f7269d66513ca7845be0f9a4c7c8c94ee"} Nov 26 07:03:01 crc kubenswrapper[4492]: I1126 07:03:01.272620 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" event={"ID":"3f02f890-8b9a-48b0-9b9d-7c8280f4639b","Type":"ContainerStarted","Data":"c93405702335aa5d75ed1afe4d7335113a1861c165a46268f79037b9d5a00e20"} Nov 26 07:03:01 crc kubenswrapper[4492]: I1126 07:03:01.272780 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:03:01 crc kubenswrapper[4492]: I1126 07:03:01.274816 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" event={"ID":"9a52fd58-9281-424a-b211-975b007e5f38","Type":"ContainerStarted","Data":"aa9333292412f34b3cfb2080714a6f954692257f2a4f3fce46a6694fd68335a0"} Nov 26 07:03:01 crc kubenswrapper[4492]: I1126 07:03:01.278083 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" event={"ID":"9dc0fcb4-c9c5-48c7-ace0-1758df8292ef","Type":"ContainerStarted","Data":"b6eff2257991041368483fe14e0610410fe3e2d72d3519cc57c8bb00a8fb09e2"} Nov 26 07:03:02 crc kubenswrapper[4492]: I1126 07:03:02.286151 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" event={"ID":"a80f2e93-10b0-4168-9de5-8116b78676b2","Type":"ContainerStarted","Data":"31e601c8518cb00a57bf192801212321b59586a41c1f3242a96e45776941ccd3"} Nov 26 07:03:03 crc kubenswrapper[4492]: I1126 07:03:03.298264 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" event={"ID":"3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02","Type":"ContainerStarted","Data":"c5cc2445becf5533ce5648f2c255cb1b1ab02fc68e2055817440c35a499fade9"} Nov 26 07:03:03 crc kubenswrapper[4492]: I1126 07:03:03.304342 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" event={"ID":"d6eb1e59-0edd-42de-98e6-42c9b95359e2","Type":"ContainerStarted","Data":"b90f40ad167eced1338d4ac29bfc98fedf28c01389d0c14213fcffbc1bd9a15f"} Nov 26 07:03:04 crc kubenswrapper[4492]: E1126 07:03:04.177461 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" podUID="fdb12da6-30c3-43f1-adbd-942cad0de886" Nov 26 07:03:04 crc kubenswrapper[4492]: E1126 07:03:04.253976 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" podUID="4bb97d9a-923e-4292-9cdb-4e764b2e90d6" Nov 26 07:03:04 crc kubenswrapper[4492]: E1126 07:03:04.384443 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" podUID="783351b0-3a7d-4857-b972-ab027165e675" Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.387881 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" event={"ID":"a80f2e93-10b0-4168-9de5-8116b78676b2","Type":"ContainerStarted","Data":"09604a1c324b44e1e009ab222d6aef5867f4ce5c0fbe25f6ef8f39fd7b8bdab2"} Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.388287 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.395832 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" event={"ID":"fdb12da6-30c3-43f1-adbd-942cad0de886","Type":"ContainerStarted","Data":"6b4e66fa11b52444f5e4acfc4e55b46bb151952ee830281cd2abd606cd5444b4"} Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.403390 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" event={"ID":"a2a736ce-f708-47dc-a121-75bc30b70b1e","Type":"ContainerStarted","Data":"db3b9ddf262c74f415c7e121689cb1abac8e6a16c72a6140b4c0de47ecd00795"} Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.403921 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" podStartSLOduration=32.403910724 podStartE2EDuration="32.403910724s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:03:01.298296844 +0000 UTC m=+877.182185142" watchObservedRunningTime="2025-11-26 07:03:04.403910724 +0000 UTC m=+880.287799022" Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.404367 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" podStartSLOduration=3.347007744 podStartE2EDuration="32.404359247s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.770292872 +0000 UTC m=+850.654181170" lastFinishedPulling="2025-11-26 07:03:03.827644375 +0000 UTC m=+879.711532673" observedRunningTime="2025-11-26 07:03:04.402304995 +0000 UTC m=+880.286193293" watchObservedRunningTime="2025-11-26 07:03:04.404359247 +0000 UTC m=+880.288247545" Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.411689 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" event={"ID":"909cbb6d-59a9-40fb-b0c5-b08c10ef8097","Type":"ContainerStarted","Data":"b80d8f6f4230f8a783b6c50bd9df57a84920383a43a6e8487a677ef053f5c6f5"} Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.428381 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" event={"ID":"d6eb1e59-0edd-42de-98e6-42c9b95359e2","Type":"ContainerStarted","Data":"7e1b68634c8208ea0d7f8504561fc4e2b8d93808aea24dee220fe97f5d78f5ae"} Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.428456 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.451509 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" podStartSLOduration=7.800985417 podStartE2EDuration="32.45150008s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.787605613 +0000 UTC m=+850.671493911" lastFinishedPulling="2025-11-26 07:02:59.438120276 +0000 UTC m=+875.322008574" observedRunningTime="2025-11-26 07:03:04.450851951 +0000 UTC m=+880.334740249" watchObservedRunningTime="2025-11-26 07:03:04.45150008 +0000 UTC m=+880.335388378" Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.456134 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" event={"ID":"4bb97d9a-923e-4292-9cdb-4e764b2e90d6","Type":"ContainerStarted","Data":"afd9d8157c93eab2fa6d667e2e101a4371384ceb9f855c4aff14d7a7787fad47"} Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.465214 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" event={"ID":"3a857a3d-1748-4fda-b8c6-4eb5ba0ffd02","Type":"ContainerStarted","Data":"239a5ac4d01593d2311c7eaa55d6fb6bb2a62e1b424dda062213a5edb141b2c9"} Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.465252 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.472656 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" event={"ID":"3fb06526-065c-48bb-987d-02406113a06b","Type":"ContainerStarted","Data":"2a2d7920938f0f88c1642e4c5cc0b39c332273fd10ac859ed87c7b088f3ec8b6"} Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.472698 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.489250 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" Nov 26 07:03:04 crc kubenswrapper[4492]: E1126 07:03:04.608477 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" podUID="72a74618-0600-42e3-8125-fd1be684497c" Nov 26 07:03:04 crc kubenswrapper[4492]: I1126 07:03:04.623201 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" podStartSLOduration=8.488822617 podStartE2EDuration="32.623186269s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.740396823 +0000 UTC m=+850.624285121" lastFinishedPulling="2025-11-26 07:02:58.874760484 +0000 UTC m=+874.758648773" observedRunningTime="2025-11-26 07:03:04.61126767 +0000 UTC m=+880.495155968" watchObservedRunningTime="2025-11-26 07:03:04.623186269 +0000 UTC m=+880.507074566" Nov 26 07:03:04 crc kubenswrapper[4492]: E1126 07:03:04.894460 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" podUID="9dce4823-5319-44c6-aa25-bc5082014598" Nov 26 07:03:05 crc kubenswrapper[4492]: E1126 07:03:05.112672 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" podUID="0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8" Nov 26 07:03:05 crc kubenswrapper[4492]: E1126 07:03:05.117858 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" podUID="bb3706c6-2488-48fd-82f0-902371c46441" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.499491 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" event={"ID":"6a3be658-cdd4-45bb-b0ba-e7e99253e0d9","Type":"ContainerStarted","Data":"45fb90f7ee8060efa59625ec7dea709e0e2cb07e94d9bae8ff73fd7af5c939ca"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.499971 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.502071 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.502878 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" event={"ID":"f99ecfd3-ef8b-4a69-bd29-e55b31465022","Type":"ContainerStarted","Data":"59cb3e8129b9f4d15e1a9822ba5aca752b744bcf2f2cf382c5eef6e7c15eeca7"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.503140 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.506728 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.508054 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" event={"ID":"1ece66ff-2fba-4ff2-b3fc-35c105b9915e","Type":"ContainerStarted","Data":"948e985ed17b271ae0b6f396ce855fed317ebb4061d00c4479f67967082f8830"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.510627 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" event={"ID":"783351b0-3a7d-4857-b972-ab027165e675","Type":"ContainerStarted","Data":"ce59893cca331cba937bc68f74b6448ac5e97014e5bfabdd1ad559b5d6200818"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.512501 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" event={"ID":"909cbb6d-59a9-40fb-b0c5-b08c10ef8097","Type":"ContainerStarted","Data":"b382d3069accb175cf80b1295adfebd2350eb4e03faa3b4e53b980e02f4fb719"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.514188 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" event={"ID":"72a74618-0600-42e3-8125-fd1be684497c","Type":"ContainerStarted","Data":"1a7629f9b16fafdb3495e531e2d01005e710082efb66918ae835a53f56480713"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.515803 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" event={"ID":"bb3706c6-2488-48fd-82f0-902371c46441","Type":"ContainerStarted","Data":"ebe0a4e14fe2318c6cb37e6d7f2f42174bcb5c5e6ee04b14641ba664e2622512"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.532605 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" event={"ID":"0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8","Type":"ContainerStarted","Data":"7eb4ca196cfc1c6d117e53d3c9bfe7a2562543b6111a4c1bd391d875b39b33fc"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.534577 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-ttzjh" podStartSLOduration=5.070230104 podStartE2EDuration="34.534565446s" podCreationTimestamp="2025-11-26 07:02:31 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.390110585 +0000 UTC m=+850.273998883" lastFinishedPulling="2025-11-26 07:03:03.854445936 +0000 UTC m=+879.738334225" observedRunningTime="2025-11-26 07:03:04.662302745 +0000 UTC m=+880.546191043" watchObservedRunningTime="2025-11-26 07:03:05.534565446 +0000 UTC m=+881.418453745" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.545271 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-pvxlk" podStartSLOduration=3.795356074 podStartE2EDuration="34.537767506s" podCreationTimestamp="2025-11-26 07:02:31 +0000 UTC" firstStartedPulling="2025-11-26 07:02:33.085049319 +0000 UTC m=+848.968937606" lastFinishedPulling="2025-11-26 07:03:03.82746074 +0000 UTC m=+879.711349038" observedRunningTime="2025-11-26 07:03:05.532937625 +0000 UTC m=+881.416825923" watchObservedRunningTime="2025-11-26 07:03:05.537767506 +0000 UTC m=+881.421655805" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.566276 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" event={"ID":"9a52fd58-9281-424a-b211-975b007e5f38","Type":"ContainerStarted","Data":"05eae281d2ba1bde911dc5e33d25db025de15e2aa6763bd500397748099b3166"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.567107 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.569622 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.598570 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" event={"ID":"095328fd-0dc7-49a2-86fa-53c5da397363","Type":"ContainerStarted","Data":"d6813e3c175d11f8be0d6112c586cbd6529412d41d7ed6d3df2bdf331a3219ae"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.599025 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.614375 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.650908 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-d6mh8" podStartSLOduration=4.212802314 podStartE2EDuration="33.650866652s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.390234027 +0000 UTC m=+850.274122325" lastFinishedPulling="2025-11-26 07:03:03.828298365 +0000 UTC m=+879.712186663" observedRunningTime="2025-11-26 07:03:05.613829777 +0000 UTC m=+881.497718075" watchObservedRunningTime="2025-11-26 07:03:05.650866652 +0000 UTC m=+881.534754950" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.652372 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" event={"ID":"9dce4823-5319-44c6-aa25-bc5082014598","Type":"ContainerStarted","Data":"0807a52ba690efd802444166fd0712659b5c34b11a48418bcffbf69d734a2010"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.681192 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" event={"ID":"7d85a216-d74b-4b58-94a9-2ccd5bfff7d4","Type":"ContainerStarted","Data":"c18c2717adae7c96c84823360f8df860c0a1ae45a6f7b56be7b73025e4e4061c"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.681750 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.683879 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.694982 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" event={"ID":"3939ad46-695a-430c-bffb-380a366540ab","Type":"ContainerStarted","Data":"56fc54b9b54752df265f9ac0187c127b13276d340705d87b78e4c2e56ba8e14a"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.702714 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.723021 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" podStartSLOduration=29.417153365 podStartE2EDuration="33.723003832s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:59.489249738 +0000 UTC m=+875.373138037" lastFinishedPulling="2025-11-26 07:03:03.795100207 +0000 UTC m=+879.678988504" observedRunningTime="2025-11-26 07:03:05.694255061 +0000 UTC m=+881.578143358" watchObservedRunningTime="2025-11-26 07:03:05.723003832 +0000 UTC m=+881.606892130" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.724551 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" event={"ID":"4bb97d9a-923e-4292-9cdb-4e764b2e90d6","Type":"ContainerStarted","Data":"6a5b3f16a97e4a6076fa19efa5774ae44e41db4d35e07ad60f3f49d4659c5322"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.724912 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.736792 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" event={"ID":"9dc0fcb4-c9c5-48c7-ace0-1758df8292ef","Type":"ContainerStarted","Data":"e3c3fd696c910fb2e5cc64f6e005447f7739806615aece942de6fc3d232032b2"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.740098 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.741964 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.757418 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" podStartSLOduration=4.628418013 podStartE2EDuration="33.757404491s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.739807404 +0000 UTC m=+850.623695693" lastFinishedPulling="2025-11-26 07:03:03.868793873 +0000 UTC m=+879.752682171" observedRunningTime="2025-11-26 07:03:05.755911645 +0000 UTC m=+881.639799943" watchObservedRunningTime="2025-11-26 07:03:05.757404491 +0000 UTC m=+881.641292790" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.763248 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" event={"ID":"a2a736ce-f708-47dc-a121-75bc30b70b1e","Type":"ContainerStarted","Data":"b37589ea99f0418d3f70f986758042a8028a68d89fd74901d141c632254123c2"} Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.765946 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.841747 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" podStartSLOduration=4.673814064 podStartE2EDuration="33.841727663s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.750388889 +0000 UTC m=+850.634277187" lastFinishedPulling="2025-11-26 07:03:03.918302489 +0000 UTC m=+879.802190786" observedRunningTime="2025-11-26 07:03:05.81979157 +0000 UTC m=+881.703679868" watchObservedRunningTime="2025-11-26 07:03:05.841727663 +0000 UTC m=+881.725615961" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.843159 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rxz2r" podStartSLOduration=4.722608867 podStartE2EDuration="33.843150739s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.750602701 +0000 UTC m=+850.634490999" lastFinishedPulling="2025-11-26 07:03:03.871144573 +0000 UTC m=+879.755032871" observedRunningTime="2025-11-26 07:03:05.843040672 +0000 UTC m=+881.726928970" watchObservedRunningTime="2025-11-26 07:03:05.843150739 +0000 UTC m=+881.727039037" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.949447 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" podStartSLOduration=29.694408731 podStartE2EDuration="33.949426636s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:59.486782289 +0000 UTC m=+875.370670587" lastFinishedPulling="2025-11-26 07:03:03.741800204 +0000 UTC m=+879.625688492" observedRunningTime="2025-11-26 07:03:05.916892065 +0000 UTC m=+881.800780383" watchObservedRunningTime="2025-11-26 07:03:05.949426636 +0000 UTC m=+881.833314934" Nov 26 07:03:05 crc kubenswrapper[4492]: I1126 07:03:05.989194 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-5g6s2" podStartSLOduration=4.8214145980000005 podStartE2EDuration="33.98915968s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.706065664 +0000 UTC m=+850.589953962" lastFinishedPulling="2025-11-26 07:03:03.873810746 +0000 UTC m=+879.757699044" observedRunningTime="2025-11-26 07:03:05.947287784 +0000 UTC m=+881.831176082" watchObservedRunningTime="2025-11-26 07:03:05.98915968 +0000 UTC m=+881.873047979" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.003658 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-47cqn" podStartSLOduration=4.530110426 podStartE2EDuration="34.003643572s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.38024184 +0000 UTC m=+850.264130128" lastFinishedPulling="2025-11-26 07:03:03.853774975 +0000 UTC m=+879.737663274" observedRunningTime="2025-11-26 07:03:05.986982497 +0000 UTC m=+881.870870795" watchObservedRunningTime="2025-11-26 07:03:06.003643572 +0000 UTC m=+881.887531871" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.064750 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" podStartSLOduration=4.287526279 podStartE2EDuration="35.06473256s" podCreationTimestamp="2025-11-26 07:02:31 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.39324087 +0000 UTC m=+850.277129169" lastFinishedPulling="2025-11-26 07:03:05.170447152 +0000 UTC m=+881.054335450" observedRunningTime="2025-11-26 07:03:06.061454538 +0000 UTC m=+881.945342835" watchObservedRunningTime="2025-11-26 07:03:06.06473256 +0000 UTC m=+881.948620858" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.104483 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-p9wsx" podStartSLOduration=4.630566032 podStartE2EDuration="34.104469062s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.38063036 +0000 UTC m=+850.264518659" lastFinishedPulling="2025-11-26 07:03:03.854533392 +0000 UTC m=+879.738421689" observedRunningTime="2025-11-26 07:03:06.097051576 +0000 UTC m=+881.980939874" watchObservedRunningTime="2025-11-26 07:03:06.104469062 +0000 UTC m=+881.988357360" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.773232 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" event={"ID":"fdb12da6-30c3-43f1-adbd-942cad0de886","Type":"ContainerStarted","Data":"2a682d74c13f281cb56f6f521e7e74e407be2695d458b9a18dda4d1461d17794"} Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.773652 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.776617 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" event={"ID":"9dce4823-5319-44c6-aa25-bc5082014598","Type":"ContainerStarted","Data":"6514b977fc94257b3df533ce6889d8533aac36dded7f1c1ae5e604b95235e568"} Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.777358 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.779529 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" event={"ID":"72a74618-0600-42e3-8125-fd1be684497c","Type":"ContainerStarted","Data":"5d239f8bda7595c114696dc9eaced58c7a34cf2885c57df03b9968fc4eecd275"} Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.779739 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.781819 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" event={"ID":"783351b0-3a7d-4857-b972-ab027165e675","Type":"ContainerStarted","Data":"6671319fc09d96d529cec533a4a6d1a94d25bd28ed6b3909ea212af794206627"} Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.781932 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.784400 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" event={"ID":"bb3706c6-2488-48fd-82f0-902371c46441","Type":"ContainerStarted","Data":"9e3b4274bb65c8438163b575e59e1e48a47d69ef334051267018d28ebf9ac0c2"} Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.784514 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.786342 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" event={"ID":"0b086d5f-ee5c-48d2-bf56-29bd8b3b6ba8","Type":"ContainerStarted","Data":"3d3d13147a7fad840f698ee216c9b24f1fb9c24ea1dc6f7cb1a082fd8369a0ea"} Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.786368 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.790486 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.790512 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.793668 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" podStartSLOduration=4.326602808 podStartE2EDuration="34.793657151s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.706797751 +0000 UTC m=+850.590686048" lastFinishedPulling="2025-11-26 07:03:05.173852094 +0000 UTC m=+881.057740391" observedRunningTime="2025-11-26 07:03:06.79219847 +0000 UTC m=+882.676086768" watchObservedRunningTime="2025-11-26 07:03:06.793657151 +0000 UTC m=+882.677545450" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.799412 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-bwwkk" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.800906 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-2bvr2" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.840190 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" podStartSLOduration=3.914523018 podStartE2EDuration="34.840167028s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.411391737 +0000 UTC m=+850.295280035" lastFinishedPulling="2025-11-26 07:03:05.337035747 +0000 UTC m=+881.220924045" observedRunningTime="2025-11-26 07:03:06.81436745 +0000 UTC m=+882.698255748" watchObservedRunningTime="2025-11-26 07:03:06.840167028 +0000 UTC m=+882.724055326" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.843212 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" podStartSLOduration=2.946830803 podStartE2EDuration="34.843206062s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.294145054 +0000 UTC m=+850.178033351" lastFinishedPulling="2025-11-26 07:03:06.190520312 +0000 UTC m=+882.074408610" observedRunningTime="2025-11-26 07:03:06.839744073 +0000 UTC m=+882.723632371" watchObservedRunningTime="2025-11-26 07:03:06.843206062 +0000 UTC m=+882.727094361" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.865890 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" podStartSLOduration=1.886162096 podStartE2EDuration="34.865872078s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:33.31357222 +0000 UTC m=+849.197460518" lastFinishedPulling="2025-11-26 07:03:06.293282202 +0000 UTC m=+882.177170500" observedRunningTime="2025-11-26 07:03:06.859610255 +0000 UTC m=+882.743498553" watchObservedRunningTime="2025-11-26 07:03:06.865872078 +0000 UTC m=+882.749760377" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.887160 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" podStartSLOduration=3.422805266 podStartE2EDuration="35.887146428s" podCreationTimestamp="2025-11-26 07:02:31 +0000 UTC" firstStartedPulling="2025-11-26 07:02:33.614303606 +0000 UTC m=+849.498191894" lastFinishedPulling="2025-11-26 07:03:06.078644757 +0000 UTC m=+881.962533056" observedRunningTime="2025-11-26 07:03:06.885269298 +0000 UTC m=+882.769157596" watchObservedRunningTime="2025-11-26 07:03:06.887146428 +0000 UTC m=+882.771034726" Nov 26 07:03:06 crc kubenswrapper[4492]: I1126 07:03:06.914461 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" podStartSLOduration=3.561705241 podStartE2EDuration="34.914442569s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.727970107 +0000 UTC m=+850.611858406" lastFinishedPulling="2025-11-26 07:03:06.080707436 +0000 UTC m=+881.964595734" observedRunningTime="2025-11-26 07:03:06.911100365 +0000 UTC m=+882.794988664" watchObservedRunningTime="2025-11-26 07:03:06.914442569 +0000 UTC m=+882.798330867" Nov 26 07:03:08 crc kubenswrapper[4492]: I1126 07:03:08.820575 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-kchvg" Nov 26 07:03:11 crc kubenswrapper[4492]: I1126 07:03:11.829404 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" event={"ID":"0c48ee47-de9c-455c-a366-48c296180ff9","Type":"ContainerStarted","Data":"d4ff4f9cdbcab43c53da3c9cc761d2eed034f5127815b980456e00916dce7c94"} Nov 26 07:03:11 crc kubenswrapper[4492]: I1126 07:03:11.849839 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xlj7m" podStartSLOduration=3.492940071 podStartE2EDuration="39.849800909s" podCreationTimestamp="2025-11-26 07:02:32 +0000 UTC" firstStartedPulling="2025-11-26 07:02:34.707050134 +0000 UTC m=+850.590938423" lastFinishedPulling="2025-11-26 07:03:11.063910963 +0000 UTC m=+886.947799261" observedRunningTime="2025-11-26 07:03:11.845112213 +0000 UTC m=+887.729000511" watchObservedRunningTime="2025-11-26 07:03:11.849800909 +0000 UTC m=+887.733689197" Nov 26 07:03:12 crc kubenswrapper[4492]: I1126 07:03:12.535278 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-jffbf" Nov 26 07:03:12 crc kubenswrapper[4492]: I1126 07:03:12.648258 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-wtbpv" Nov 26 07:03:12 crc kubenswrapper[4492]: I1126 07:03:12.650770 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-727xv" Nov 26 07:03:12 crc kubenswrapper[4492]: I1126 07:03:12.651522 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-jh4gx" Nov 26 07:03:12 crc kubenswrapper[4492]: I1126 07:03:12.660670 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-q7sh4" Nov 26 07:03:12 crc kubenswrapper[4492]: I1126 07:03:12.668436 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vp7gp" Nov 26 07:03:12 crc kubenswrapper[4492]: I1126 07:03:12.766366 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-mw4rx" Nov 26 07:03:13 crc kubenswrapper[4492]: I1126 07:03:13.131186 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-dvbx8" Nov 26 07:03:13 crc kubenswrapper[4492]: I1126 07:03:13.163273 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-vv4f8" Nov 26 07:03:13 crc kubenswrapper[4492]: I1126 07:03:13.198774 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-b88tl" Nov 26 07:03:18 crc kubenswrapper[4492]: I1126 07:03:18.427872 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-bbzk8" Nov 26 07:03:18 crc kubenswrapper[4492]: I1126 07:03:18.555142 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-x5xwx" Nov 26 07:03:19 crc kubenswrapper[4492]: I1126 07:03:19.441482 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:03:19 crc kubenswrapper[4492]: I1126 07:03:19.441557 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:03:30 crc kubenswrapper[4492]: I1126 07:03:30.949703 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59899b64cc-ltrhr"] Nov 26 07:03:30 crc kubenswrapper[4492]: I1126 07:03:30.951039 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:03:30 crc kubenswrapper[4492]: I1126 07:03:30.956521 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 07:03:30 crc kubenswrapper[4492]: I1126 07:03:30.956525 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 07:03:30 crc kubenswrapper[4492]: I1126 07:03:30.956565 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-thps5" Nov 26 07:03:30 crc kubenswrapper[4492]: I1126 07:03:30.956772 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 07:03:30 crc kubenswrapper[4492]: I1126 07:03:30.969407 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59899b64cc-ltrhr"] Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.029885 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f51b5379-fafd-41cf-aa2b-f705adb424b4-config\") pod \"dnsmasq-dns-59899b64cc-ltrhr\" (UID: \"f51b5379-fafd-41cf-aa2b-f705adb424b4\") " pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.030001 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld7ns\" (UniqueName: \"kubernetes.io/projected/f51b5379-fafd-41cf-aa2b-f705adb424b4-kube-api-access-ld7ns\") pod \"dnsmasq-dns-59899b64cc-ltrhr\" (UID: \"f51b5379-fafd-41cf-aa2b-f705adb424b4\") " pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.036136 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bbd9697cc-sb6h8"] Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.037254 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.038558 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.047525 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bbd9697cc-sb6h8"] Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.131501 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-dns-svc\") pod \"dnsmasq-dns-7bbd9697cc-sb6h8\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.131783 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8wnd\" (UniqueName: \"kubernetes.io/projected/d0f38be1-2d63-4e62-8961-b52fdad1a712-kube-api-access-d8wnd\") pod \"dnsmasq-dns-7bbd9697cc-sb6h8\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.132001 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-config\") pod \"dnsmasq-dns-7bbd9697cc-sb6h8\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.132050 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld7ns\" (UniqueName: \"kubernetes.io/projected/f51b5379-fafd-41cf-aa2b-f705adb424b4-kube-api-access-ld7ns\") pod \"dnsmasq-dns-59899b64cc-ltrhr\" (UID: \"f51b5379-fafd-41cf-aa2b-f705adb424b4\") " pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.132127 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f51b5379-fafd-41cf-aa2b-f705adb424b4-config\") pod \"dnsmasq-dns-59899b64cc-ltrhr\" (UID: \"f51b5379-fafd-41cf-aa2b-f705adb424b4\") " pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.133153 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f51b5379-fafd-41cf-aa2b-f705adb424b4-config\") pod \"dnsmasq-dns-59899b64cc-ltrhr\" (UID: \"f51b5379-fafd-41cf-aa2b-f705adb424b4\") " pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.152832 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld7ns\" (UniqueName: \"kubernetes.io/projected/f51b5379-fafd-41cf-aa2b-f705adb424b4-kube-api-access-ld7ns\") pod \"dnsmasq-dns-59899b64cc-ltrhr\" (UID: \"f51b5379-fafd-41cf-aa2b-f705adb424b4\") " pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.233457 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-dns-svc\") pod \"dnsmasq-dns-7bbd9697cc-sb6h8\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.233503 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8wnd\" (UniqueName: \"kubernetes.io/projected/d0f38be1-2d63-4e62-8961-b52fdad1a712-kube-api-access-d8wnd\") pod \"dnsmasq-dns-7bbd9697cc-sb6h8\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.233532 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-config\") pod \"dnsmasq-dns-7bbd9697cc-sb6h8\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.234330 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-dns-svc\") pod \"dnsmasq-dns-7bbd9697cc-sb6h8\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.234348 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-config\") pod \"dnsmasq-dns-7bbd9697cc-sb6h8\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.250732 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8wnd\" (UniqueName: \"kubernetes.io/projected/d0f38be1-2d63-4e62-8961-b52fdad1a712-kube-api-access-d8wnd\") pod \"dnsmasq-dns-7bbd9697cc-sb6h8\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.275536 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.353681 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.672937 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59899b64cc-ltrhr"] Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.787631 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bbd9697cc-sb6h8"] Nov 26 07:03:31 crc kubenswrapper[4492]: W1126 07:03:31.791741 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0f38be1_2d63_4e62_8961_b52fdad1a712.slice/crio-28c1ac2469aad1f3f0e9537077729dcf1fbe71f3e4902ea2aac2aafd033da647 WatchSource:0}: Error finding container 28c1ac2469aad1f3f0e9537077729dcf1fbe71f3e4902ea2aac2aafd033da647: Status 404 returned error can't find the container with id 28c1ac2469aad1f3f0e9537077729dcf1fbe71f3e4902ea2aac2aafd033da647 Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.960494 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" event={"ID":"f51b5379-fafd-41cf-aa2b-f705adb424b4","Type":"ContainerStarted","Data":"1efbefd14efe0d0e11f85e3f8411644adaf575993a1d3119826999edd08af721"} Nov 26 07:03:31 crc kubenswrapper[4492]: I1126 07:03:31.961352 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" event={"ID":"d0f38be1-2d63-4e62-8961-b52fdad1a712","Type":"ContainerStarted","Data":"28c1ac2469aad1f3f0e9537077729dcf1fbe71f3e4902ea2aac2aafd033da647"} Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.079114 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59899b64cc-ltrhr"] Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.116349 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85744f897f-kbhm6"] Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.117506 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.142108 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85744f897f-kbhm6"] Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.171621 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-dns-svc\") pod \"dnsmasq-dns-85744f897f-kbhm6\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.171657 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-config\") pod \"dnsmasq-dns-85744f897f-kbhm6\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.171684 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdbqx\" (UniqueName: \"kubernetes.io/projected/ef4332f8-32f1-4ec9-a333-8b8b025151be-kube-api-access-hdbqx\") pod \"dnsmasq-dns-85744f897f-kbhm6\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.273199 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdbqx\" (UniqueName: \"kubernetes.io/projected/ef4332f8-32f1-4ec9-a333-8b8b025151be-kube-api-access-hdbqx\") pod \"dnsmasq-dns-85744f897f-kbhm6\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.273300 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-dns-svc\") pod \"dnsmasq-dns-85744f897f-kbhm6\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.273327 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-config\") pod \"dnsmasq-dns-85744f897f-kbhm6\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.274212 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-config\") pod \"dnsmasq-dns-85744f897f-kbhm6\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.274937 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-dns-svc\") pod \"dnsmasq-dns-85744f897f-kbhm6\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.299254 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdbqx\" (UniqueName: \"kubernetes.io/projected/ef4332f8-32f1-4ec9-a333-8b8b025151be-kube-api-access-hdbqx\") pod \"dnsmasq-dns-85744f897f-kbhm6\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.431148 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bbd9697cc-sb6h8"] Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.436670 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.473221 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-657d948df5-j4blw"] Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.474408 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.531521 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-657d948df5-j4blw"] Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.583545 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44xsd\" (UniqueName: \"kubernetes.io/projected/e068f599-17d9-4fd1-8fca-d74938c89110-kube-api-access-44xsd\") pod \"dnsmasq-dns-657d948df5-j4blw\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.583627 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-config\") pod \"dnsmasq-dns-657d948df5-j4blw\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.583697 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-dns-svc\") pod \"dnsmasq-dns-657d948df5-j4blw\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.687473 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-config\") pod \"dnsmasq-dns-657d948df5-j4blw\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.687575 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-dns-svc\") pod \"dnsmasq-dns-657d948df5-j4blw\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.687709 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44xsd\" (UniqueName: \"kubernetes.io/projected/e068f599-17d9-4fd1-8fca-d74938c89110-kube-api-access-44xsd\") pod \"dnsmasq-dns-657d948df5-j4blw\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.689028 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-config\") pod \"dnsmasq-dns-657d948df5-j4blw\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.689625 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-dns-svc\") pod \"dnsmasq-dns-657d948df5-j4blw\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.718564 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44xsd\" (UniqueName: \"kubernetes.io/projected/e068f599-17d9-4fd1-8fca-d74938c89110-kube-api-access-44xsd\") pod \"dnsmasq-dns-657d948df5-j4blw\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:34 crc kubenswrapper[4492]: I1126 07:03:34.801611 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.147535 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85744f897f-kbhm6"] Nov 26 07:03:35 crc kubenswrapper[4492]: W1126 07:03:35.204166 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef4332f8_32f1_4ec9_a333_8b8b025151be.slice/crio-636b64151c1488f3e0d68f0e1bc7657715425ed4f60ce2bfbfb92c7ba3ed2990 WatchSource:0}: Error finding container 636b64151c1488f3e0d68f0e1bc7657715425ed4f60ce2bfbfb92c7ba3ed2990: Status 404 returned error can't find the container with id 636b64151c1488f3e0d68f0e1bc7657715425ed4f60ce2bfbfb92c7ba3ed2990 Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.307486 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.310134 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.311816 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.322041 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.322476 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.322612 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.322742 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.322860 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.322989 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-dd6zc" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.324891 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415082 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3bb75c38-10db-46c0-947c-3d91eca8f110-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415214 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6grv\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-kube-api-access-f6grv\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415252 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3bb75c38-10db-46c0-947c-3d91eca8f110-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415371 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415407 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415547 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415699 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415762 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415836 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415865 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.415916 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.506070 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-657d948df5-j4blw"] Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521622 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3bb75c38-10db-46c0-947c-3d91eca8f110-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521677 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6grv\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-kube-api-access-f6grv\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521727 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3bb75c38-10db-46c0-947c-3d91eca8f110-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521755 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521775 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521801 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521857 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521884 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521925 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521962 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.521993 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.524012 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.524329 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.525925 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.526280 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.527832 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.527963 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: W1126 07:03:35.528880 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode068f599_17d9_4fd1_8fca_d74938c89110.slice/crio-9e0d611b70f013b36f31e50674522a705a0cacea7db9550447fb82c5b57142c6 WatchSource:0}: Error finding container 9e0d611b70f013b36f31e50674522a705a0cacea7db9550447fb82c5b57142c6: Status 404 returned error can't find the container with id 9e0d611b70f013b36f31e50674522a705a0cacea7db9550447fb82c5b57142c6 Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.529868 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3bb75c38-10db-46c0-947c-3d91eca8f110-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.532790 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.533593 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.541786 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6grv\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-kube-api-access-f6grv\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.570335 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.570629 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3bb75c38-10db-46c0-947c-3d91eca8f110-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.598716 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.600335 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.607301 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.607763 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.608147 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-fw95k" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.608303 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.608415 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.608590 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.609130 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.639225 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.655897 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.737571 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.737696 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.737758 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/daa58280-e6a7-477f-bfdb-accd4f56ac4d-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.737828 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.737870 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-config-data\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.737937 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.737995 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc4k4\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-kube-api-access-dc4k4\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.738020 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.738320 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-server-conf\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.738362 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.738390 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/daa58280-e6a7-477f-bfdb-accd4f56ac4d-pod-info\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840287 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840331 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc4k4\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-kube-api-access-dc4k4\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840358 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840388 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-server-conf\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840411 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840426 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/daa58280-e6a7-477f-bfdb-accd4f56ac4d-pod-info\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840450 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840476 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840500 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/daa58280-e6a7-477f-bfdb-accd4f56ac4d-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840522 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840541 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-config-data\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.840822 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.841550 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.842575 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.843200 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-config-data\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.843438 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.847541 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.854508 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/daa58280-e6a7-477f-bfdb-accd4f56ac4d-pod-info\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.854561 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-server-conf\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.854593 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/daa58280-e6a7-477f-bfdb-accd4f56ac4d-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.854883 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.878026 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.879140 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc4k4\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-kube-api-access-dc4k4\") pod \"rabbitmq-server-0\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " pod="openstack/rabbitmq-server-0" Nov 26 07:03:35 crc kubenswrapper[4492]: I1126 07:03:35.929076 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.086940 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85744f897f-kbhm6" event={"ID":"ef4332f8-32f1-4ec9-a333-8b8b025151be","Type":"ContainerStarted","Data":"636b64151c1488f3e0d68f0e1bc7657715425ed4f60ce2bfbfb92c7ba3ed2990"} Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.095589 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-657d948df5-j4blw" event={"ID":"e068f599-17d9-4fd1-8fca-d74938c89110","Type":"ContainerStarted","Data":"9e0d611b70f013b36f31e50674522a705a0cacea7db9550447fb82c5b57142c6"} Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.171871 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:03:36 crc kubenswrapper[4492]: W1126 07:03:36.190721 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3bb75c38_10db_46c0_947c_3d91eca8f110.slice/crio-9459340d60f48000ea76afd601230115cc2d298bdb618f8e5d7cc7f2814e99f7 WatchSource:0}: Error finding container 9459340d60f48000ea76afd601230115cc2d298bdb618f8e5d7cc7f2814e99f7: Status 404 returned error can't find the container with id 9459340d60f48000ea76afd601230115cc2d298bdb618f8e5d7cc7f2814e99f7 Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.489376 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.981032 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.983019 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.990204 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.992291 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-f5zcj" Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.992768 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.992367 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 07:03:36 crc kubenswrapper[4492]: I1126 07:03:36.996663 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.001756 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.073068 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/11fb7794-a2db-4320-8946-91b18bb44afa-kolla-config\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.073770 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/11fb7794-a2db-4320-8946-91b18bb44afa-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.073881 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.073994 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/11fb7794-a2db-4320-8946-91b18bb44afa-config-data-generated\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.074024 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fb7794-a2db-4320-8946-91b18bb44afa-operator-scripts\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.074143 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/11fb7794-a2db-4320-8946-91b18bb44afa-config-data-default\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.074333 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11fb7794-a2db-4320-8946-91b18bb44afa-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.074397 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbmmh\" (UniqueName: \"kubernetes.io/projected/11fb7794-a2db-4320-8946-91b18bb44afa-kube-api-access-pbmmh\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.159899 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"daa58280-e6a7-477f-bfdb-accd4f56ac4d","Type":"ContainerStarted","Data":"0ac47d0bf75344d8e6895e5db6fd3abd9d9ea667f31e08dae7425d8c56cf2f8f"} Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.162078 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3bb75c38-10db-46c0-947c-3d91eca8f110","Type":"ContainerStarted","Data":"9459340d60f48000ea76afd601230115cc2d298bdb618f8e5d7cc7f2814e99f7"} Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.177213 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/11fb7794-a2db-4320-8946-91b18bb44afa-kolla-config\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.177332 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/11fb7794-a2db-4320-8946-91b18bb44afa-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.177381 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.177465 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/11fb7794-a2db-4320-8946-91b18bb44afa-config-data-generated\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.177509 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fb7794-a2db-4320-8946-91b18bb44afa-operator-scripts\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.177602 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/11fb7794-a2db-4320-8946-91b18bb44afa-config-data-default\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.177644 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11fb7794-a2db-4320-8946-91b18bb44afa-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.177684 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbmmh\" (UniqueName: \"kubernetes.io/projected/11fb7794-a2db-4320-8946-91b18bb44afa-kube-api-access-pbmmh\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.179658 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.182589 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/11fb7794-a2db-4320-8946-91b18bb44afa-kolla-config\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.186929 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/11fb7794-a2db-4320-8946-91b18bb44afa-config-data-default\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.187994 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11fb7794-a2db-4320-8946-91b18bb44afa-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.189862 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/11fb7794-a2db-4320-8946-91b18bb44afa-config-data-generated\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.190547 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/11fb7794-a2db-4320-8946-91b18bb44afa-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.197509 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbmmh\" (UniqueName: \"kubernetes.io/projected/11fb7794-a2db-4320-8946-91b18bb44afa-kube-api-access-pbmmh\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.199245 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fb7794-a2db-4320-8946-91b18bb44afa-operator-scripts\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.210472 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"11fb7794-a2db-4320-8946-91b18bb44afa\") " pod="openstack/openstack-galera-0" Nov 26 07:03:37 crc kubenswrapper[4492]: I1126 07:03:37.311724 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.259316 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 07:03:38 crc kubenswrapper[4492]: W1126 07:03:38.278760 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11fb7794_a2db_4320_8946_91b18bb44afa.slice/crio-b49c4e0f4af956e729da804a7dfae9a939175c756d9a697af6c538dc6d496069 WatchSource:0}: Error finding container b49c4e0f4af956e729da804a7dfae9a939175c756d9a697af6c538dc6d496069: Status 404 returned error can't find the container with id b49c4e0f4af956e729da804a7dfae9a939175c756d9a697af6c538dc6d496069 Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.525282 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.533155 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.538340 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-p842w" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.538632 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.539035 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.538465 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.570626 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.603591 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.606986 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.612638 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-6fvww" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.612835 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.615014 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.617335 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.622954 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2772f93f-41fd-4817-9c9f-3932e094b0ac-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.623069 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2772f93f-41fd-4817-9c9f-3932e094b0ac-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.623105 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2772f93f-41fd-4817-9c9f-3932e094b0ac-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.623121 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2772f93f-41fd-4817-9c9f-3932e094b0ac-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.623140 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2772f93f-41fd-4817-9c9f-3932e094b0ac-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.623198 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.623249 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m54p2\" (UniqueName: \"kubernetes.io/projected/2772f93f-41fd-4817-9c9f-3932e094b0ac-kube-api-access-m54p2\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.623273 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2772f93f-41fd-4817-9c9f-3932e094b0ac-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.724765 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2772f93f-41fd-4817-9c9f-3932e094b0ac-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.724830 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2772f93f-41fd-4817-9c9f-3932e094b0ac-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.724857 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2772f93f-41fd-4817-9c9f-3932e094b0ac-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.725284 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2772f93f-41fd-4817-9c9f-3932e094b0ac-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.726522 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2772f93f-41fd-4817-9c9f-3932e094b0ac-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.726567 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2772f93f-41fd-4817-9c9f-3932e094b0ac-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.726620 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.727137 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-kolla-config\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.727197 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df5pl\" (UniqueName: \"kubernetes.io/projected/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-kube-api-access-df5pl\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.727220 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m54p2\" (UniqueName: \"kubernetes.io/projected/2772f93f-41fd-4817-9c9f-3932e094b0ac-kube-api-access-m54p2\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.727245 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2772f93f-41fd-4817-9c9f-3932e094b0ac-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.727267 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.727329 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2772f93f-41fd-4817-9c9f-3932e094b0ac-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.727354 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.727370 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-config-data\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.727600 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2772f93f-41fd-4817-9c9f-3932e094b0ac-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.731124 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2772f93f-41fd-4817-9c9f-3932e094b0ac-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.731399 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.731941 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2772f93f-41fd-4817-9c9f-3932e094b0ac-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.745486 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2772f93f-41fd-4817-9c9f-3932e094b0ac-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.753655 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m54p2\" (UniqueName: \"kubernetes.io/projected/2772f93f-41fd-4817-9c9f-3932e094b0ac-kube-api-access-m54p2\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.765380 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2772f93f-41fd-4817-9c9f-3932e094b0ac\") " pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.829916 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-kolla-config\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.830102 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-kolla-config\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.830150 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df5pl\" (UniqueName: \"kubernetes.io/projected/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-kube-api-access-df5pl\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.832742 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.832842 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-config-data\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.832857 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.833663 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-config-data\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.838647 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.841481 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.853516 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.860628 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df5pl\" (UniqueName: \"kubernetes.io/projected/0a5df877-dd0c-4f1d-90bd-07a9b738e4e3-kube-api-access-df5pl\") pod \"memcached-0\" (UID: \"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3\") " pod="openstack/memcached-0" Nov 26 07:03:38 crc kubenswrapper[4492]: I1126 07:03:38.949608 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 07:03:39 crc kubenswrapper[4492]: I1126 07:03:39.200795 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"11fb7794-a2db-4320-8946-91b18bb44afa","Type":"ContainerStarted","Data":"b49c4e0f4af956e729da804a7dfae9a939175c756d9a697af6c538dc6d496069"} Nov 26 07:03:39 crc kubenswrapper[4492]: I1126 07:03:39.408801 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 07:03:39 crc kubenswrapper[4492]: I1126 07:03:39.600894 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.225972 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2772f93f-41fd-4817-9c9f-3932e094b0ac","Type":"ContainerStarted","Data":"04b19cecd2f5d153c3e01e3565d08dff0cb86e72619a92bfabe33c52a4f22c1b"} Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.235845 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3","Type":"ContainerStarted","Data":"debe4fc27c9fd36b397cd1410bd6fd6f8e8a3d34b4db6b8437a3ff40c077a9e2"} Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.526468 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.528333 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.531650 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjsxl\" (UniqueName: \"kubernetes.io/projected/40ff3c7f-2bdf-42be-bcde-659ad3f15ca5-kube-api-access-pjsxl\") pod \"kube-state-metrics-0\" (UID: \"40ff3c7f-2bdf-42be-bcde-659ad3f15ca5\") " pod="openstack/kube-state-metrics-0" Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.541292 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-dkj9n" Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.554091 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.643429 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjsxl\" (UniqueName: \"kubernetes.io/projected/40ff3c7f-2bdf-42be-bcde-659ad3f15ca5-kube-api-access-pjsxl\") pod \"kube-state-metrics-0\" (UID: \"40ff3c7f-2bdf-42be-bcde-659ad3f15ca5\") " pod="openstack/kube-state-metrics-0" Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.672941 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjsxl\" (UniqueName: \"kubernetes.io/projected/40ff3c7f-2bdf-42be-bcde-659ad3f15ca5-kube-api-access-pjsxl\") pod \"kube-state-metrics-0\" (UID: \"40ff3c7f-2bdf-42be-bcde-659ad3f15ca5\") " pod="openstack/kube-state-metrics-0" Nov 26 07:03:40 crc kubenswrapper[4492]: I1126 07:03:40.881205 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:03:41 crc kubenswrapper[4492]: I1126 07:03:41.656454 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.295201 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-576bz"] Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.298321 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.308937 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-576bz"] Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.364049 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bs59\" (UniqueName: \"kubernetes.io/projected/da5f9543-4f48-47b1-9164-d2e736a1bc6f-kube-api-access-2bs59\") pod \"redhat-marketplace-576bz\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.364256 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-utilities\") pod \"redhat-marketplace-576bz\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.364316 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-catalog-content\") pod \"redhat-marketplace-576bz\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.467092 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bs59\" (UniqueName: \"kubernetes.io/projected/da5f9543-4f48-47b1-9164-d2e736a1bc6f-kube-api-access-2bs59\") pod \"redhat-marketplace-576bz\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.467220 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-utilities\") pod \"redhat-marketplace-576bz\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.467255 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-catalog-content\") pod \"redhat-marketplace-576bz\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.467711 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-catalog-content\") pod \"redhat-marketplace-576bz\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.468278 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-utilities\") pod \"redhat-marketplace-576bz\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.511295 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bs59\" (UniqueName: \"kubernetes.io/projected/da5f9543-4f48-47b1-9164-d2e736a1bc6f-kube-api-access-2bs59\") pod \"redhat-marketplace-576bz\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:43 crc kubenswrapper[4492]: I1126 07:03:43.657362 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.377026 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.378835 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.390326 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.390563 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-zcpwd" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.390715 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.390822 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.391467 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.393408 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.496643 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e87fc594-8585-4df8-97e6-2abda24b3fcc-config\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.496920 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e87fc594-8585-4df8-97e6-2abda24b3fcc-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.497010 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsqst\" (UniqueName: \"kubernetes.io/projected/e87fc594-8585-4df8-97e6-2abda24b3fcc-kube-api-access-qsqst\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.497030 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.497049 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e87fc594-8585-4df8-97e6-2abda24b3fcc-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.497065 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e87fc594-8585-4df8-97e6-2abda24b3fcc-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.497083 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e87fc594-8585-4df8-97e6-2abda24b3fcc-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.497102 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87fc594-8585-4df8-97e6-2abda24b3fcc-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.600022 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87fc594-8585-4df8-97e6-2abda24b3fcc-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.600105 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e87fc594-8585-4df8-97e6-2abda24b3fcc-config\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.600183 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e87fc594-8585-4df8-97e6-2abda24b3fcc-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.600428 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsqst\" (UniqueName: \"kubernetes.io/projected/e87fc594-8585-4df8-97e6-2abda24b3fcc-kube-api-access-qsqst\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.600448 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.600473 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e87fc594-8585-4df8-97e6-2abda24b3fcc-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.600489 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e87fc594-8585-4df8-97e6-2abda24b3fcc-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.600506 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e87fc594-8585-4df8-97e6-2abda24b3fcc-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.601850 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e87fc594-8585-4df8-97e6-2abda24b3fcc-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.603363 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.603839 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e87fc594-8585-4df8-97e6-2abda24b3fcc-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.605836 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e87fc594-8585-4df8-97e6-2abda24b3fcc-config\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.613785 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e87fc594-8585-4df8-97e6-2abda24b3fcc-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.614865 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e87fc594-8585-4df8-97e6-2abda24b3fcc-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.629528 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e87fc594-8585-4df8-97e6-2abda24b3fcc-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.652878 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsqst\" (UniqueName: \"kubernetes.io/projected/e87fc594-8585-4df8-97e6-2abda24b3fcc-kube-api-access-qsqst\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.681120 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"e87fc594-8585-4df8-97e6-2abda24b3fcc\") " pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:44 crc kubenswrapper[4492]: I1126 07:03:44.718326 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.346005 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-djlnq"] Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.347635 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.350630 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-554mv" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.351204 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.351275 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.362014 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-djlnq"] Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.432491 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-var-run\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.432548 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-scripts\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.432601 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-combined-ca-bundle\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.434270 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-hrhlg"] Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.436009 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.438123 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-var-run-ovn\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.438229 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-var-log-ovn\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.438329 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qcv4\" (UniqueName: \"kubernetes.io/projected/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-kube-api-access-4qcv4\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.438360 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-ovn-controller-tls-certs\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.442495 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hrhlg"] Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.539707 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-ovn-controller-tls-certs\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.539783 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-var-lib\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.539828 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-var-run\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.539846 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-etc-ovs\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.539886 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/68e83779-7285-47ce-927b-e3f862af6367-scripts\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.539913 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-scripts\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.539938 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-var-run\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.540151 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-combined-ca-bundle\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.540301 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkfjh\" (UniqueName: \"kubernetes.io/projected/68e83779-7285-47ce-927b-e3f862af6367-kube-api-access-hkfjh\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.540339 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-var-run\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.540404 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-var-log\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.540456 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-var-run-ovn\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.540496 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-var-log-ovn\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.540701 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qcv4\" (UniqueName: \"kubernetes.io/projected/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-kube-api-access-4qcv4\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.540846 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-var-run-ovn\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.542299 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-var-log-ovn\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.542647 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-scripts\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.546439 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-ovn-controller-tls-certs\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.547128 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-combined-ca-bundle\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.554939 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qcv4\" (UniqueName: \"kubernetes.io/projected/cd517ac8-3f42-4406-8bb2-dd7f1b87daf7-kube-api-access-4qcv4\") pod \"ovn-controller-djlnq\" (UID: \"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7\") " pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.642880 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/68e83779-7285-47ce-927b-e3f862af6367-scripts\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.642967 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-var-run\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.643035 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkfjh\" (UniqueName: \"kubernetes.io/projected/68e83779-7285-47ce-927b-e3f862af6367-kube-api-access-hkfjh\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.643091 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-var-log\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.643274 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-var-lib\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.643321 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-etc-ovs\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.644129 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-etc-ovs\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.647472 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-var-run\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.647510 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-var-lib\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.647671 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/68e83779-7285-47ce-927b-e3f862af6367-var-log\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.647735 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/68e83779-7285-47ce-927b-e3f862af6367-scripts\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.679625 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkfjh\" (UniqueName: \"kubernetes.io/projected/68e83779-7285-47ce-927b-e3f862af6367-kube-api-access-hkfjh\") pod \"ovn-controller-ovs-hrhlg\" (UID: \"68e83779-7285-47ce-927b-e3f862af6367\") " pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.685675 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq" Nov 26 07:03:45 crc kubenswrapper[4492]: I1126 07:03:45.765734 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.661628 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c4tj9"] Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.674280 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.684564 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c4tj9"] Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.775964 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-utilities\") pod \"certified-operators-c4tj9\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.776037 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wfjp\" (UniqueName: \"kubernetes.io/projected/a883fb65-c766-4a13-bac0-177d4ffe2de2-kube-api-access-4wfjp\") pod \"certified-operators-c4tj9\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.776119 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-catalog-content\") pod \"certified-operators-c4tj9\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.879025 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-catalog-content\") pod \"certified-operators-c4tj9\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.879150 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-utilities\") pod \"certified-operators-c4tj9\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.879186 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wfjp\" (UniqueName: \"kubernetes.io/projected/a883fb65-c766-4a13-bac0-177d4ffe2de2-kube-api-access-4wfjp\") pod \"certified-operators-c4tj9\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.879760 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-catalog-content\") pod \"certified-operators-c4tj9\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.880153 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-utilities\") pod \"certified-operators-c4tj9\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:46 crc kubenswrapper[4492]: I1126 07:03:46.909096 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wfjp\" (UniqueName: \"kubernetes.io/projected/a883fb65-c766-4a13-bac0-177d4ffe2de2-kube-api-access-4wfjp\") pod \"certified-operators-c4tj9\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:47 crc kubenswrapper[4492]: I1126 07:03:47.007687 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:03:47 crc kubenswrapper[4492]: I1126 07:03:47.993409 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 07:03:47 crc kubenswrapper[4492]: I1126 07:03:47.995291 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:47.998148 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-9mgml" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:47.998919 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.001461 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.001695 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.025874 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.116368 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/40545f10-cf57-45a2-b500-33811a633cc8-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.116505 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlcth\" (UniqueName: \"kubernetes.io/projected/40545f10-cf57-45a2-b500-33811a633cc8-kube-api-access-dlcth\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.116725 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40545f10-cf57-45a2-b500-33811a633cc8-config\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.116789 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/40545f10-cf57-45a2-b500-33811a633cc8-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.116923 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.117000 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40545f10-cf57-45a2-b500-33811a633cc8-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.117034 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/40545f10-cf57-45a2-b500-33811a633cc8-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.117070 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/40545f10-cf57-45a2-b500-33811a633cc8-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.218534 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40545f10-cf57-45a2-b500-33811a633cc8-config\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.218606 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/40545f10-cf57-45a2-b500-33811a633cc8-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.218641 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.218673 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40545f10-cf57-45a2-b500-33811a633cc8-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.218699 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/40545f10-cf57-45a2-b500-33811a633cc8-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.218725 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/40545f10-cf57-45a2-b500-33811a633cc8-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.218779 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/40545f10-cf57-45a2-b500-33811a633cc8-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.218802 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlcth\" (UniqueName: \"kubernetes.io/projected/40545f10-cf57-45a2-b500-33811a633cc8-kube-api-access-dlcth\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.219658 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.228740 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/40545f10-cf57-45a2-b500-33811a633cc8-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.229160 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/40545f10-cf57-45a2-b500-33811a633cc8-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.229306 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/40545f10-cf57-45a2-b500-33811a633cc8-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.229522 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/40545f10-cf57-45a2-b500-33811a633cc8-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.229851 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40545f10-cf57-45a2-b500-33811a633cc8-config\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.235247 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlcth\" (UniqueName: \"kubernetes.io/projected/40545f10-cf57-45a2-b500-33811a633cc8-kube-api-access-dlcth\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.238876 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40545f10-cf57-45a2-b500-33811a633cc8-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.259494 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"40545f10-cf57-45a2-b500-33811a633cc8\") " pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:48 crc kubenswrapper[4492]: I1126 07:03:48.312507 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 07:03:49 crc kubenswrapper[4492]: I1126 07:03:49.442498 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:03:49 crc kubenswrapper[4492]: I1126 07:03:49.443043 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:03:52 crc kubenswrapper[4492]: I1126 07:03:52.693412 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"40ff3c7f-2bdf-42be-bcde-659ad3f15ca5","Type":"ContainerStarted","Data":"97755a404ec6ec5660768195301d3ec736371bff2f40f1e29193dca68bf14e09"} Nov 26 07:04:00 crc kubenswrapper[4492]: I1126 07:04:00.378411 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 07:04:00 crc kubenswrapper[4492]: E1126 07:04:00.598021 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-neutron-server:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:04:00 crc kubenswrapper[4492]: E1126 07:04:00.598091 4492 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-neutron-server:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:04:00 crc kubenswrapper[4492]: E1126 07:04:00.598234 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-neutron-server:1f5c0439f2433cb462b222a5bb23e629,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ld7ns,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-59899b64cc-ltrhr_openstack(f51b5379-fafd-41cf-aa2b-f705adb424b4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:04:00 crc kubenswrapper[4492]: E1126 07:04:00.599618 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" podUID="f51b5379-fafd-41cf-aa2b-f705adb424b4" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.652015 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2nmf5"] Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.654014 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.662627 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2nmf5"] Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.838225 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-catalog-content\") pod \"redhat-operators-2nmf5\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.838262 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d49m6\" (UniqueName: \"kubernetes.io/projected/0fd9ab99-d364-4442-a203-ce8a7b838cf3-kube-api-access-d49m6\") pod \"redhat-operators-2nmf5\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.838350 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-utilities\") pod \"redhat-operators-2nmf5\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.940054 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-catalog-content\") pod \"redhat-operators-2nmf5\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.940117 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d49m6\" (UniqueName: \"kubernetes.io/projected/0fd9ab99-d364-4442-a203-ce8a7b838cf3-kube-api-access-d49m6\") pod \"redhat-operators-2nmf5\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.940157 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-utilities\") pod \"redhat-operators-2nmf5\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.940698 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-utilities\") pod \"redhat-operators-2nmf5\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.940943 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-catalog-content\") pod \"redhat-operators-2nmf5\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.961055 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d49m6\" (UniqueName: \"kubernetes.io/projected/0fd9ab99-d364-4442-a203-ce8a7b838cf3-kube-api-access-d49m6\") pod \"redhat-operators-2nmf5\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:03 crc kubenswrapper[4492]: I1126 07:04:03.974010 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:04 crc kubenswrapper[4492]: E1126 07:04:04.072432 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-neutron-server:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:04:04 crc kubenswrapper[4492]: E1126 07:04:04.072696 4492 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-neutron-server:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:04:04 crc kubenswrapper[4492]: E1126 07:04:04.072815 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-neutron-server:1f5c0439f2433cb462b222a5bb23e629,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d8wnd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bbd9697cc-sb6h8_openstack(d0f38be1-2d63-4e62-8961-b52fdad1a712): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:04:04 crc kubenswrapper[4492]: E1126 07:04:04.074501 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" podUID="d0f38be1-2d63-4e62-8961-b52fdad1a712" Nov 26 07:04:04 crc kubenswrapper[4492]: E1126 07:04:04.091551 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:04:04 crc kubenswrapper[4492]: E1126 07:04:04.091603 4492 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:04:04 crc kubenswrapper[4492]: E1126 07:04:04.091787 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-mariadb:1f5c0439f2433cb462b222a5bb23e629,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m54p2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(2772f93f-41fd-4817-9c9f-3932e094b0ac): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:04:04 crc kubenswrapper[4492]: E1126 07:04:04.092957 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="2772f93f-41fd-4817-9c9f-3932e094b0ac" Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.100113 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.325485 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.353047 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ld7ns\" (UniqueName: \"kubernetes.io/projected/f51b5379-fafd-41cf-aa2b-f705adb424b4-kube-api-access-ld7ns\") pod \"f51b5379-fafd-41cf-aa2b-f705adb424b4\" (UID: \"f51b5379-fafd-41cf-aa2b-f705adb424b4\") " Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.353138 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f51b5379-fafd-41cf-aa2b-f705adb424b4-config\") pod \"f51b5379-fafd-41cf-aa2b-f705adb424b4\" (UID: \"f51b5379-fafd-41cf-aa2b-f705adb424b4\") " Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.353706 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f51b5379-fafd-41cf-aa2b-f705adb424b4-config" (OuterVolumeSpecName: "config") pod "f51b5379-fafd-41cf-aa2b-f705adb424b4" (UID: "f51b5379-fafd-41cf-aa2b-f705adb424b4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.360626 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f51b5379-fafd-41cf-aa2b-f705adb424b4-kube-api-access-ld7ns" (OuterVolumeSpecName: "kube-api-access-ld7ns") pod "f51b5379-fafd-41cf-aa2b-f705adb424b4" (UID: "f51b5379-fafd-41cf-aa2b-f705adb424b4"). InnerVolumeSpecName "kube-api-access-ld7ns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.455549 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f51b5379-fafd-41cf-aa2b-f705adb424b4-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.455575 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ld7ns\" (UniqueName: \"kubernetes.io/projected/f51b5379-fafd-41cf-aa2b-f705adb424b4-kube-api-access-ld7ns\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.602821 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-576bz"] Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.798702 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"40545f10-cf57-45a2-b500-33811a633cc8","Type":"ContainerStarted","Data":"17feb4389b39545ee93a8a2d9050d47a51d65881ffab021fbf462dd95dd2668f"} Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.801642 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" event={"ID":"f51b5379-fafd-41cf-aa2b-f705adb424b4","Type":"ContainerDied","Data":"1efbefd14efe0d0e11f85e3f8411644adaf575993a1d3119826999edd08af721"} Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.801766 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59899b64cc-ltrhr" Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.807720 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-576bz" event={"ID":"da5f9543-4f48-47b1-9164-d2e736a1bc6f","Type":"ContainerStarted","Data":"10dcfe2a33f1dc3cd68ac32475c8d85a6097c0fa9ec309ed29e456af66e1a8fa"} Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.904194 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59899b64cc-ltrhr"] Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.910734 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59899b64cc-ltrhr"] Nov 26 07:04:04 crc kubenswrapper[4492]: I1126 07:04:04.918753 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c4tj9"] Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.045878 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-djlnq"] Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.053120 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.279536 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hrhlg"] Nov 26 07:04:05 crc kubenswrapper[4492]: W1126 07:04:05.436761 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda883fb65_c766_4a13_bac0_177d4ffe2de2.slice/crio-e722f401c80fe89a3a44571ede359e47abfaf53b8d94a436e350dbfe4afa20cb WatchSource:0}: Error finding container e722f401c80fe89a3a44571ede359e47abfaf53b8d94a436e350dbfe4afa20cb: Status 404 returned error can't find the container with id e722f401c80fe89a3a44571ede359e47abfaf53b8d94a436e350dbfe4afa20cb Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.553675 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.706252 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8wnd\" (UniqueName: \"kubernetes.io/projected/d0f38be1-2d63-4e62-8961-b52fdad1a712-kube-api-access-d8wnd\") pod \"d0f38be1-2d63-4e62-8961-b52fdad1a712\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.706662 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-dns-svc\") pod \"d0f38be1-2d63-4e62-8961-b52fdad1a712\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.706721 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-config\") pod \"d0f38be1-2d63-4e62-8961-b52fdad1a712\" (UID: \"d0f38be1-2d63-4e62-8961-b52fdad1a712\") " Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.707871 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d0f38be1-2d63-4e62-8961-b52fdad1a712" (UID: "d0f38be1-2d63-4e62-8961-b52fdad1a712"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.708284 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-config" (OuterVolumeSpecName: "config") pod "d0f38be1-2d63-4e62-8961-b52fdad1a712" (UID: "d0f38be1-2d63-4e62-8961-b52fdad1a712"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.713365 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0f38be1-2d63-4e62-8961-b52fdad1a712-kube-api-access-d8wnd" (OuterVolumeSpecName: "kube-api-access-d8wnd") pod "d0f38be1-2d63-4e62-8961-b52fdad1a712" (UID: "d0f38be1-2d63-4e62-8961-b52fdad1a712"). InnerVolumeSpecName "kube-api-access-d8wnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.809458 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.809487 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f38be1-2d63-4e62-8961-b52fdad1a712-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.809498 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8wnd\" (UniqueName: \"kubernetes.io/projected/d0f38be1-2d63-4e62-8961-b52fdad1a712-kube-api-access-d8wnd\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.818870 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hrhlg" event={"ID":"68e83779-7285-47ce-927b-e3f862af6367","Type":"ContainerStarted","Data":"477f7ff75f48bba471add03cbfc6ea8d9335ab2134436dd8dc387d853163c164"} Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.821261 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-djlnq" event={"ID":"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7","Type":"ContainerStarted","Data":"4f065ff2a058232b35b025961cd3f5f350e96ad379719e40ee9f8f8ad08634b0"} Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.822360 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"e87fc594-8585-4df8-97e6-2abda24b3fcc","Type":"ContainerStarted","Data":"cdd67f15c22ff5b14e883ab6129bb11ba41606975e35cb43586466454400c9c2"} Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.825794 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" event={"ID":"d0f38be1-2d63-4e62-8961-b52fdad1a712","Type":"ContainerDied","Data":"28c1ac2469aad1f3f0e9537077729dcf1fbe71f3e4902ea2aac2aafd033da647"} Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.825881 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bbd9697cc-sb6h8" Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.839152 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4tj9" event={"ID":"a883fb65-c766-4a13-bac0-177d4ffe2de2","Type":"ContainerStarted","Data":"e722f401c80fe89a3a44571ede359e47abfaf53b8d94a436e350dbfe4afa20cb"} Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.895034 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bbd9697cc-sb6h8"] Nov 26 07:04:05 crc kubenswrapper[4492]: I1126 07:04:05.896785 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bbd9697cc-sb6h8"] Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.021566 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2nmf5"] Nov 26 07:04:06 crc kubenswrapper[4492]: W1126 07:04:06.149856 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0fd9ab99_d364_4442_a203_ce8a7b838cf3.slice/crio-961d32c65dfc85e38bb0e078ba2bf3119c26fdbb6a049493ebe20792affaf3db WatchSource:0}: Error finding container 961d32c65dfc85e38bb0e078ba2bf3119c26fdbb6a049493ebe20792affaf3db: Status 404 returned error can't find the container with id 961d32c65dfc85e38bb0e078ba2bf3119c26fdbb6a049493ebe20792affaf3db Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.229366 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kh56n"] Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.238256 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.298228 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kh56n"] Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.350621 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-utilities\") pod \"community-operators-kh56n\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.350728 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrf9x\" (UniqueName: \"kubernetes.io/projected/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-kube-api-access-xrf9x\") pod \"community-operators-kh56n\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.350764 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-catalog-content\") pod \"community-operators-kh56n\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.449679 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0f38be1-2d63-4e62-8961-b52fdad1a712" path="/var/lib/kubelet/pods/d0f38be1-2d63-4e62-8961-b52fdad1a712/volumes" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.450166 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f51b5379-fafd-41cf-aa2b-f705adb424b4" path="/var/lib/kubelet/pods/f51b5379-fafd-41cf-aa2b-f705adb424b4/volumes" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.451853 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrf9x\" (UniqueName: \"kubernetes.io/projected/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-kube-api-access-xrf9x\") pod \"community-operators-kh56n\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.451921 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-catalog-content\") pod \"community-operators-kh56n\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.452017 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-utilities\") pod \"community-operators-kh56n\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.452517 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-utilities\") pod \"community-operators-kh56n\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.452576 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-catalog-content\") pod \"community-operators-kh56n\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.468599 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrf9x\" (UniqueName: \"kubernetes.io/projected/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-kube-api-access-xrf9x\") pod \"community-operators-kh56n\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.567768 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:06 crc kubenswrapper[4492]: I1126 07:04:06.852204 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nmf5" event={"ID":"0fd9ab99-d364-4442-a203-ce8a7b838cf3","Type":"ContainerStarted","Data":"961d32c65dfc85e38bb0e078ba2bf3119c26fdbb6a049493ebe20792affaf3db"} Nov 26 07:04:07 crc kubenswrapper[4492]: I1126 07:04:07.867148 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-657d948df5-j4blw" event={"ID":"e068f599-17d9-4fd1-8fca-d74938c89110","Type":"ContainerStarted","Data":"2881fdd9cc380266076fc339a53601bf207514a8b59c814a593a32c6b9f4b07e"} Nov 26 07:04:07 crc kubenswrapper[4492]: I1126 07:04:07.872542 4492 generic.go:334] "Generic (PLEG): container finished" podID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerID="4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0" exitCode=0 Nov 26 07:04:07 crc kubenswrapper[4492]: I1126 07:04:07.872625 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-576bz" event={"ID":"da5f9543-4f48-47b1-9164-d2e736a1bc6f","Type":"ContainerDied","Data":"4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0"} Nov 26 07:04:07 crc kubenswrapper[4492]: I1126 07:04:07.874309 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4tj9" event={"ID":"a883fb65-c766-4a13-bac0-177d4ffe2de2","Type":"ContainerStarted","Data":"8f2597a58fb32da3dfeb32a3d8dcc27d91be2a0ffb55f795e752a457797b34ee"} Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.003430 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kh56n"] Nov 26 07:04:08 crc kubenswrapper[4492]: W1126 07:04:08.095849 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9ccf1b8_a1eb_4501_b96c_929efdde7c47.slice/crio-f5559be523f7106c5547a83ae4973770c22ca85f583ac8bc3b9659bf088b5377 WatchSource:0}: Error finding container f5559be523f7106c5547a83ae4973770c22ca85f583ac8bc3b9659bf088b5377: Status 404 returned error can't find the container with id f5559be523f7106c5547a83ae4973770c22ca85f583ac8bc3b9659bf088b5377 Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.670936 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-2whmr"] Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.672397 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.676386 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.692325 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-ovn-rundir\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.692387 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-ovs-rundir\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.692419 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-config\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.692435 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.692454 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-combined-ca-bundle\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.692643 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tppzh\" (UniqueName: \"kubernetes.io/projected/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-kube-api-access-tppzh\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.701672 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-2whmr"] Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.795053 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tppzh\" (UniqueName: \"kubernetes.io/projected/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-kube-api-access-tppzh\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.795121 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-ovn-rundir\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.795184 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-ovs-rundir\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.795204 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-config\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.795219 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.795238 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-combined-ca-bundle\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.796559 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-ovs-rundir\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.796994 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-ovn-rundir\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.797759 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-config\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.803422 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-combined-ca-bundle\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.818267 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.828662 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tppzh\" (UniqueName: \"kubernetes.io/projected/1b32cec9-eeea-4fcd-b20c-ad500f516fa6-kube-api-access-tppzh\") pod \"ovn-controller-metrics-2whmr\" (UID: \"1b32cec9-eeea-4fcd-b20c-ad500f516fa6\") " pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.887556 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"40ff3c7f-2bdf-42be-bcde-659ad3f15ca5","Type":"ContainerStarted","Data":"cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d"} Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.887650 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.891924 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3bb75c38-10db-46c0-947c-3d91eca8f110","Type":"ContainerStarted","Data":"3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b"} Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.893630 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"11fb7794-a2db-4320-8946-91b18bb44afa","Type":"ContainerStarted","Data":"5c2c77394f97727de82c7b33516c1fe0e747fe0783e4bdce1e1269298f902bdb"} Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.901756 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-657d948df5-j4blw"] Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.904120 4492 generic.go:334] "Generic (PLEG): container finished" podID="ef4332f8-32f1-4ec9-a333-8b8b025151be" containerID="a496251bbb669b4b6dc85628df2d6cd35432e9f92ded0f1dc7e9b57eb9a0d280" exitCode=0 Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.904165 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85744f897f-kbhm6" event={"ID":"ef4332f8-32f1-4ec9-a333-8b8b025151be","Type":"ContainerDied","Data":"a496251bbb669b4b6dc85628df2d6cd35432e9f92ded0f1dc7e9b57eb9a0d280"} Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.911403 4492 generic.go:334] "Generic (PLEG): container finished" podID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerID="2dd03be46eb8fb90ed6fc6a22a4ed1bd99a099ae388f17b75b01084b532f3929" exitCode=0 Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.911471 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh56n" event={"ID":"b9ccf1b8-a1eb-4501-b96c-929efdde7c47","Type":"ContainerDied","Data":"2dd03be46eb8fb90ed6fc6a22a4ed1bd99a099ae388f17b75b01084b532f3929"} Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.911498 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh56n" event={"ID":"b9ccf1b8-a1eb-4501-b96c-929efdde7c47","Type":"ContainerStarted","Data":"f5559be523f7106c5547a83ae4973770c22ca85f583ac8bc3b9659bf088b5377"} Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.914849 4492 generic.go:334] "Generic (PLEG): container finished" podID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerID="1cc614f5739d6759766904cec4202186d0d69f06c660fb5ce7216b80d255871e" exitCode=0 Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.915591 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nmf5" event={"ID":"0fd9ab99-d364-4442-a203-ce8a7b838cf3","Type":"ContainerDied","Data":"1cc614f5739d6759766904cec4202186d0d69f06c660fb5ce7216b80d255871e"} Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.966684 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2772f93f-41fd-4817-9c9f-3932e094b0ac","Type":"ContainerStarted","Data":"76f6721ef47097a99c3712bac699c0d079baa416a4d1de201bf205264cfbe608"} Nov 26 07:04:08 crc kubenswrapper[4492]: I1126 07:04:08.984197 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=13.121273185 podStartE2EDuration="28.984163119s" podCreationTimestamp="2025-11-26 07:03:40 +0000 UTC" firstStartedPulling="2025-11-26 07:03:51.687374239 +0000 UTC m=+927.571262537" lastFinishedPulling="2025-11-26 07:04:07.550264174 +0000 UTC m=+943.434152471" observedRunningTime="2025-11-26 07:04:08.972416182 +0000 UTC m=+944.856304480" watchObservedRunningTime="2025-11-26 07:04:08.984163119 +0000 UTC m=+944.868051416" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.005794 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75f567bcf9-n86dv"] Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.013647 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.015227 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.017675 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-2whmr" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.019958 4492 generic.go:334] "Generic (PLEG): container finished" podID="e068f599-17d9-4fd1-8fca-d74938c89110" containerID="2881fdd9cc380266076fc339a53601bf207514a8b59c814a593a32c6b9f4b07e" exitCode=0 Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.020065 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-657d948df5-j4blw" event={"ID":"e068f599-17d9-4fd1-8fca-d74938c89110","Type":"ContainerDied","Data":"2881fdd9cc380266076fc339a53601bf207514a8b59c814a593a32c6b9f4b07e"} Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.044390 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"0a5df877-dd0c-4f1d-90bd-07a9b738e4e3","Type":"ContainerStarted","Data":"261ac503e60a7ec624559894bb8b5cd47cf6dc1ba8242ff72d5db16f8c66cfa2"} Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.044977 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.052335 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"daa58280-e6a7-477f-bfdb-accd4f56ac4d","Type":"ContainerStarted","Data":"0608b29441266fad95d69d5b2720f135463681abce1d524e8aa820621905da40"} Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.058349 4492 generic.go:334] "Generic (PLEG): container finished" podID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerID="8f2597a58fb32da3dfeb32a3d8dcc27d91be2a0ffb55f795e752a457797b34ee" exitCode=0 Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.058388 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4tj9" event={"ID":"a883fb65-c766-4a13-bac0-177d4ffe2de2","Type":"ContainerDied","Data":"8f2597a58fb32da3dfeb32a3d8dcc27d91be2a0ffb55f795e752a457797b34ee"} Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.164779 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75f567bcf9-n86dv"] Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.225755 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-ovsdbserver-nb\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.225829 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-dns-svc\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.225861 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-config\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.225913 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtvnb\" (UniqueName: \"kubernetes.io/projected/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-kube-api-access-mtvnb\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.337462 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-ovsdbserver-nb\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.337523 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-dns-svc\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.337603 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-config\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.337651 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtvnb\" (UniqueName: \"kubernetes.io/projected/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-kube-api-access-mtvnb\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.338564 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-config\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.338814 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-ovsdbserver-nb\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.339151 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-dns-svc\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.362902 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=6.404805387 podStartE2EDuration="31.362886075s" podCreationTimestamp="2025-11-26 07:03:38 +0000 UTC" firstStartedPulling="2025-11-26 07:03:39.419749619 +0000 UTC m=+915.303637916" lastFinishedPulling="2025-11-26 07:04:04.377830306 +0000 UTC m=+940.261718604" observedRunningTime="2025-11-26 07:04:09.35869788 +0000 UTC m=+945.242586167" watchObservedRunningTime="2025-11-26 07:04:09.362886075 +0000 UTC m=+945.246774363" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.374397 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtvnb\" (UniqueName: \"kubernetes.io/projected/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-kube-api-access-mtvnb\") pod \"dnsmasq-dns-75f567bcf9-n86dv\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.408641 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85744f897f-kbhm6"] Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.433684 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66f998579f-v5z4b"] Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.434914 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.446839 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.483842 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66f998579f-v5z4b"] Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.554148 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-config\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.554425 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flxtb\" (UniqueName: \"kubernetes.io/projected/b05f3cd2-b60c-4dc3-9455-bf23335b678a-kube-api-access-flxtb\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.554481 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-nb\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.554531 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-dns-svc\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.554557 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-sb\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: E1126 07:04:09.625819 4492 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 26 07:04:09 crc kubenswrapper[4492]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/ef4332f8-32f1-4ec9-a333-8b8b025151be/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 26 07:04:09 crc kubenswrapper[4492]: > podSandboxID="636b64151c1488f3e0d68f0e1bc7657715425ed4f60ce2bfbfb92c7ba3ed2990" Nov 26 07:04:09 crc kubenswrapper[4492]: E1126 07:04:09.625965 4492 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 26 07:04:09 crc kubenswrapper[4492]: container &Container{Name:dnsmasq-dns,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-neutron-server:1f5c0439f2433cb462b222a5bb23e629,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hdbqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-85744f897f-kbhm6_openstack(ef4332f8-32f1-4ec9-a333-8b8b025151be): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/ef4332f8-32f1-4ec9-a333-8b8b025151be/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 26 07:04:09 crc kubenswrapper[4492]: > logger="UnhandledError" Nov 26 07:04:09 crc kubenswrapper[4492]: E1126 07:04:09.627412 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/ef4332f8-32f1-4ec9-a333-8b8b025151be/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-85744f897f-kbhm6" podUID="ef4332f8-32f1-4ec9-a333-8b8b025151be" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.655078 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.656999 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-config\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.657030 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flxtb\" (UniqueName: \"kubernetes.io/projected/b05f3cd2-b60c-4dc3-9455-bf23335b678a-kube-api-access-flxtb\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.657109 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-nb\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.657213 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-dns-svc\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.657245 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-sb\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.658337 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-config\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.658408 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-sb\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.658680 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-nb\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.658842 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-dns-svc\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.709874 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flxtb\" (UniqueName: \"kubernetes.io/projected/b05f3cd2-b60c-4dc3-9455-bf23335b678a-kube-api-access-flxtb\") pod \"dnsmasq-dns-66f998579f-v5z4b\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.764222 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:09 crc kubenswrapper[4492]: I1126 07:04:09.872768 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-2whmr"] Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.081745 4492 generic.go:334] "Generic (PLEG): container finished" podID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerID="035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4" exitCode=0 Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.081947 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-576bz" event={"ID":"da5f9543-4f48-47b1-9164-d2e736a1bc6f","Type":"ContainerDied","Data":"035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4"} Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.113031 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh56n" event={"ID":"b9ccf1b8-a1eb-4501-b96c-929efdde7c47","Type":"ContainerStarted","Data":"e71ca96ed9f545971c429ea0c754d993eb4b8ae1c656206017aabb06f50144e2"} Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.163480 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nmf5" event={"ID":"0fd9ab99-d364-4442-a203-ce8a7b838cf3","Type":"ContainerStarted","Data":"6dfe90b3854d34ce2f548bdd89ca965bb159362e7e4057baab45fd705cb8e3dd"} Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.175825 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-2whmr" event={"ID":"1b32cec9-eeea-4fcd-b20c-ad500f516fa6","Type":"ContainerStarted","Data":"c443b5f3893974a6e06eca8b49de61ebb6f04663e0ede9631de93981b267d272"} Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.184665 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-657d948df5-j4blw" podUID="e068f599-17d9-4fd1-8fca-d74938c89110" containerName="dnsmasq-dns" containerID="cri-o://fe50aad6586bd863bd10976427d326eb86afe11fa0f019af82348aa874d7fce7" gracePeriod=10 Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.184739 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-657d948df5-j4blw" event={"ID":"e068f599-17d9-4fd1-8fca-d74938c89110","Type":"ContainerStarted","Data":"fe50aad6586bd863bd10976427d326eb86afe11fa0f019af82348aa874d7fce7"} Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.185200 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.235872 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-657d948df5-j4blw" podStartSLOduration=6.328398572 podStartE2EDuration="36.235855703s" podCreationTimestamp="2025-11-26 07:03:34 +0000 UTC" firstStartedPulling="2025-11-26 07:03:35.542338018 +0000 UTC m=+911.426226316" lastFinishedPulling="2025-11-26 07:04:05.449795149 +0000 UTC m=+941.333683447" observedRunningTime="2025-11-26 07:04:10.232875008 +0000 UTC m=+946.116763306" watchObservedRunningTime="2025-11-26 07:04:10.235855703 +0000 UTC m=+946.119744001" Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.420265 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75f567bcf9-n86dv"] Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.718984 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66f998579f-v5z4b"] Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.759808 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.925215 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdbqx\" (UniqueName: \"kubernetes.io/projected/ef4332f8-32f1-4ec9-a333-8b8b025151be-kube-api-access-hdbqx\") pod \"ef4332f8-32f1-4ec9-a333-8b8b025151be\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.925430 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-config\") pod \"ef4332f8-32f1-4ec9-a333-8b8b025151be\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.925461 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-dns-svc\") pod \"ef4332f8-32f1-4ec9-a333-8b8b025151be\" (UID: \"ef4332f8-32f1-4ec9-a333-8b8b025151be\") " Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.931708 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef4332f8-32f1-4ec9-a333-8b8b025151be-kube-api-access-hdbqx" (OuterVolumeSpecName: "kube-api-access-hdbqx") pod "ef4332f8-32f1-4ec9-a333-8b8b025151be" (UID: "ef4332f8-32f1-4ec9-a333-8b8b025151be"). InnerVolumeSpecName "kube-api-access-hdbqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:10 crc kubenswrapper[4492]: I1126 07:04:10.985899 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-config" (OuterVolumeSpecName: "config") pod "ef4332f8-32f1-4ec9-a333-8b8b025151be" (UID: "ef4332f8-32f1-4ec9-a333-8b8b025151be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.000131 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ef4332f8-32f1-4ec9-a333-8b8b025151be" (UID: "ef4332f8-32f1-4ec9-a333-8b8b025151be"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.031436 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdbqx\" (UniqueName: \"kubernetes.io/projected/ef4332f8-32f1-4ec9-a333-8b8b025151be-kube-api-access-hdbqx\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.031488 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.031500 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef4332f8-32f1-4ec9-a333-8b8b025151be-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.225828 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" event={"ID":"018929d0-9b48-4fda-8f21-1c4d4f6efc4c","Type":"ContainerStarted","Data":"95ebff9cbbb40af453d29c4b668cab4c9a21eb68726d214d68f9feaa206a31fb"} Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.228111 4492 generic.go:334] "Generic (PLEG): container finished" podID="e068f599-17d9-4fd1-8fca-d74938c89110" containerID="fe50aad6586bd863bd10976427d326eb86afe11fa0f019af82348aa874d7fce7" exitCode=0 Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.228164 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-657d948df5-j4blw" event={"ID":"e068f599-17d9-4fd1-8fca-d74938c89110","Type":"ContainerDied","Data":"fe50aad6586bd863bd10976427d326eb86afe11fa0f019af82348aa874d7fce7"} Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.229483 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85744f897f-kbhm6" event={"ID":"ef4332f8-32f1-4ec9-a333-8b8b025151be","Type":"ContainerDied","Data":"636b64151c1488f3e0d68f0e1bc7657715425ed4f60ce2bfbfb92c7ba3ed2990"} Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.229519 4492 scope.go:117] "RemoveContainer" containerID="a496251bbb669b4b6dc85628df2d6cd35432e9f92ded0f1dc7e9b57eb9a0d280" Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.229648 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85744f897f-kbhm6" Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.238830 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4tj9" event={"ID":"a883fb65-c766-4a13-bac0-177d4ffe2de2","Type":"ContainerStarted","Data":"25a9281aadab501ac13bdd51fe0230fc2490922ad9c8717d3b353da9dc1992dd"} Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.241862 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" event={"ID":"b05f3cd2-b60c-4dc3-9455-bf23335b678a","Type":"ContainerStarted","Data":"bb471aa8e4c11fa404fdbe26c149a12053cb31c01f54866a20863a420714761e"} Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.290557 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85744f897f-kbhm6"] Nov 26 07:04:11 crc kubenswrapper[4492]: I1126 07:04:11.293735 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85744f897f-kbhm6"] Nov 26 07:04:12 crc kubenswrapper[4492]: I1126 07:04:12.255011 4492 generic.go:334] "Generic (PLEG): container finished" podID="b05f3cd2-b60c-4dc3-9455-bf23335b678a" containerID="54c7391888a2d826f38a2c3d2511eab9909b6c6eaf0432b604f32e3adcf0de5c" exitCode=0 Nov 26 07:04:12 crc kubenswrapper[4492]: I1126 07:04:12.255821 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" event={"ID":"b05f3cd2-b60c-4dc3-9455-bf23335b678a","Type":"ContainerDied","Data":"54c7391888a2d826f38a2c3d2511eab9909b6c6eaf0432b604f32e3adcf0de5c"} Nov 26 07:04:12 crc kubenswrapper[4492]: I1126 07:04:12.259772 4492 generic.go:334] "Generic (PLEG): container finished" podID="018929d0-9b48-4fda-8f21-1c4d4f6efc4c" containerID="90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8" exitCode=0 Nov 26 07:04:12 crc kubenswrapper[4492]: I1126 07:04:12.259841 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" event={"ID":"018929d0-9b48-4fda-8f21-1c4d4f6efc4c","Type":"ContainerDied","Data":"90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8"} Nov 26 07:04:12 crc kubenswrapper[4492]: I1126 07:04:12.268704 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-576bz" event={"ID":"da5f9543-4f48-47b1-9164-d2e736a1bc6f","Type":"ContainerStarted","Data":"6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6"} Nov 26 07:04:12 crc kubenswrapper[4492]: I1126 07:04:12.291548 4492 generic.go:334] "Generic (PLEG): container finished" podID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerID="e71ca96ed9f545971c429ea0c754d993eb4b8ae1c656206017aabb06f50144e2" exitCode=0 Nov 26 07:04:12 crc kubenswrapper[4492]: I1126 07:04:12.291778 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh56n" event={"ID":"b9ccf1b8-a1eb-4501-b96c-929efdde7c47","Type":"ContainerDied","Data":"e71ca96ed9f545971c429ea0c754d993eb4b8ae1c656206017aabb06f50144e2"} Nov 26 07:04:12 crc kubenswrapper[4492]: I1126 07:04:12.326730 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-576bz" podStartSLOduration=25.229050333 podStartE2EDuration="29.326717073s" podCreationTimestamp="2025-11-26 07:03:43 +0000 UTC" firstStartedPulling="2025-11-26 07:04:07.898683314 +0000 UTC m=+943.782571612" lastFinishedPulling="2025-11-26 07:04:11.996350064 +0000 UTC m=+947.880238352" observedRunningTime="2025-11-26 07:04:12.322847216 +0000 UTC m=+948.206735514" watchObservedRunningTime="2025-11-26 07:04:12.326717073 +0000 UTC m=+948.210605361" Nov 26 07:04:12 crc kubenswrapper[4492]: I1126 07:04:12.454728 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef4332f8-32f1-4ec9-a333-8b8b025151be" path="/var/lib/kubelet/pods/ef4332f8-32f1-4ec9-a333-8b8b025151be/volumes" Nov 26 07:04:13 crc kubenswrapper[4492]: I1126 07:04:13.303408 4492 generic.go:334] "Generic (PLEG): container finished" podID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerID="25a9281aadab501ac13bdd51fe0230fc2490922ad9c8717d3b353da9dc1992dd" exitCode=0 Nov 26 07:04:13 crc kubenswrapper[4492]: I1126 07:04:13.303458 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4tj9" event={"ID":"a883fb65-c766-4a13-bac0-177d4ffe2de2","Type":"ContainerDied","Data":"25a9281aadab501ac13bdd51fe0230fc2490922ad9c8717d3b353da9dc1992dd"} Nov 26 07:04:13 crc kubenswrapper[4492]: I1126 07:04:13.306594 4492 generic.go:334] "Generic (PLEG): container finished" podID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerID="6dfe90b3854d34ce2f548bdd89ca965bb159362e7e4057baab45fd705cb8e3dd" exitCode=0 Nov 26 07:04:13 crc kubenswrapper[4492]: I1126 07:04:13.307338 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nmf5" event={"ID":"0fd9ab99-d364-4442-a203-ce8a7b838cf3","Type":"ContainerDied","Data":"6dfe90b3854d34ce2f548bdd89ca965bb159362e7e4057baab45fd705cb8e3dd"} Nov 26 07:04:13 crc kubenswrapper[4492]: I1126 07:04:13.657712 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:04:13 crc kubenswrapper[4492]: I1126 07:04:13.657758 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:04:13 crc kubenswrapper[4492]: I1126 07:04:13.694775 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:04:13 crc kubenswrapper[4492]: I1126 07:04:13.955619 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 26 07:04:15 crc kubenswrapper[4492]: I1126 07:04:15.330722 4492 generic.go:334] "Generic (PLEG): container finished" podID="2772f93f-41fd-4817-9c9f-3932e094b0ac" containerID="76f6721ef47097a99c3712bac699c0d079baa416a4d1de201bf205264cfbe608" exitCode=0 Nov 26 07:04:15 crc kubenswrapper[4492]: I1126 07:04:15.330803 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2772f93f-41fd-4817-9c9f-3932e094b0ac","Type":"ContainerDied","Data":"76f6721ef47097a99c3712bac699c0d079baa416a4d1de201bf205264cfbe608"} Nov 26 07:04:15 crc kubenswrapper[4492]: I1126 07:04:15.332426 4492 generic.go:334] "Generic (PLEG): container finished" podID="11fb7794-a2db-4320-8946-91b18bb44afa" containerID="5c2c77394f97727de82c7b33516c1fe0e747fe0783e4bdce1e1269298f902bdb" exitCode=0 Nov 26 07:04:15 crc kubenswrapper[4492]: I1126 07:04:15.332466 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"11fb7794-a2db-4320-8946-91b18bb44afa","Type":"ContainerDied","Data":"5c2c77394f97727de82c7b33516c1fe0e747fe0783e4bdce1e1269298f902bdb"} Nov 26 07:04:15 crc kubenswrapper[4492]: I1126 07:04:15.878970 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.047975 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44xsd\" (UniqueName: \"kubernetes.io/projected/e068f599-17d9-4fd1-8fca-d74938c89110-kube-api-access-44xsd\") pod \"e068f599-17d9-4fd1-8fca-d74938c89110\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.048099 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-dns-svc\") pod \"e068f599-17d9-4fd1-8fca-d74938c89110\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.048162 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-config\") pod \"e068f599-17d9-4fd1-8fca-d74938c89110\" (UID: \"e068f599-17d9-4fd1-8fca-d74938c89110\") " Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.059337 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e068f599-17d9-4fd1-8fca-d74938c89110-kube-api-access-44xsd" (OuterVolumeSpecName: "kube-api-access-44xsd") pod "e068f599-17d9-4fd1-8fca-d74938c89110" (UID: "e068f599-17d9-4fd1-8fca-d74938c89110"). InnerVolumeSpecName "kube-api-access-44xsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.096151 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e068f599-17d9-4fd1-8fca-d74938c89110" (UID: "e068f599-17d9-4fd1-8fca-d74938c89110"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.103575 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-config" (OuterVolumeSpecName: "config") pod "e068f599-17d9-4fd1-8fca-d74938c89110" (UID: "e068f599-17d9-4fd1-8fca-d74938c89110"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.150255 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44xsd\" (UniqueName: \"kubernetes.io/projected/e068f599-17d9-4fd1-8fca-d74938c89110-kube-api-access-44xsd\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.150286 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.150296 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e068f599-17d9-4fd1-8fca-d74938c89110-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.344242 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-657d948df5-j4blw" event={"ID":"e068f599-17d9-4fd1-8fca-d74938c89110","Type":"ContainerDied","Data":"9e0d611b70f013b36f31e50674522a705a0cacea7db9550447fb82c5b57142c6"} Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.344306 4492 scope.go:117] "RemoveContainer" containerID="fe50aad6586bd863bd10976427d326eb86afe11fa0f019af82348aa874d7fce7" Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.344260 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-657d948df5-j4blw" Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.376544 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-657d948df5-j4blw"] Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.382234 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-657d948df5-j4blw"] Nov 26 07:04:16 crc kubenswrapper[4492]: I1126 07:04:16.449840 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e068f599-17d9-4fd1-8fca-d74938c89110" path="/var/lib/kubelet/pods/e068f599-17d9-4fd1-8fca-d74938c89110/volumes" Nov 26 07:04:17 crc kubenswrapper[4492]: I1126 07:04:17.930387 4492 scope.go:117] "RemoveContainer" containerID="2881fdd9cc380266076fc339a53601bf207514a8b59c814a593a32c6b9f4b07e" Nov 26 07:04:18 crc kubenswrapper[4492]: I1126 07:04:18.391527 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"11fb7794-a2db-4320-8946-91b18bb44afa","Type":"ContainerStarted","Data":"8c9add5bd2e0d21f18688825eebfeab74d524bd09235528bc40cf4ab4294842a"} Nov 26 07:04:18 crc kubenswrapper[4492]: I1126 07:04:18.395682 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh56n" event={"ID":"b9ccf1b8-a1eb-4501-b96c-929efdde7c47","Type":"ContainerStarted","Data":"c8fa541244d89f0dd6566869f9096b18ab4e232557c168d4547df0c00453667a"} Nov 26 07:04:18 crc kubenswrapper[4492]: I1126 07:04:18.435428 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=16.274398621 podStartE2EDuration="43.435403375s" podCreationTimestamp="2025-11-26 07:03:35 +0000 UTC" firstStartedPulling="2025-11-26 07:03:38.288941379 +0000 UTC m=+914.172829667" lastFinishedPulling="2025-11-26 07:04:05.449946124 +0000 UTC m=+941.333834421" observedRunningTime="2025-11-26 07:04:18.423360493 +0000 UTC m=+954.307248791" watchObservedRunningTime="2025-11-26 07:04:18.435403375 +0000 UTC m=+954.319291674" Nov 26 07:04:18 crc kubenswrapper[4492]: I1126 07:04:18.450317 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kh56n" podStartSLOduration=3.322144243 podStartE2EDuration="12.450297439s" podCreationTimestamp="2025-11-26 07:04:06 +0000 UTC" firstStartedPulling="2025-11-26 07:04:08.912370536 +0000 UTC m=+944.796258834" lastFinishedPulling="2025-11-26 07:04:18.040523732 +0000 UTC m=+953.924412030" observedRunningTime="2025-11-26 07:04:18.437763663 +0000 UTC m=+954.321651961" watchObservedRunningTime="2025-11-26 07:04:18.450297439 +0000 UTC m=+954.334185737" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.414738 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4tj9" event={"ID":"a883fb65-c766-4a13-bac0-177d4ffe2de2","Type":"ContainerStarted","Data":"667fe7f9fac92792f20759960bbdb77314e590e81b72b869b4c91e79661f1c69"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.417329 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"40545f10-cf57-45a2-b500-33811a633cc8","Type":"ContainerStarted","Data":"76a7c0b5038f765e76d3aa6e8e983bc2164a5b2cf3b9ab228b2e5ea04c7eae96"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.417364 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"40545f10-cf57-45a2-b500-33811a633cc8","Type":"ContainerStarted","Data":"e48427d7e32d197980766865adba84042c1d98d27e8a7854116ac0c0f30ac42f"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.419149 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-2whmr" event={"ID":"1b32cec9-eeea-4fcd-b20c-ad500f516fa6","Type":"ContainerStarted","Data":"5accfd5c520d453fab79f17d61a4b5ed565248fa4e147e0603b6228983c69d08"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.420520 4492 generic.go:334] "Generic (PLEG): container finished" podID="68e83779-7285-47ce-927b-e3f862af6367" containerID="fe67368fc1e5ba1cb5658efee7b39b7ce7cb7c98edf44a46b8c3ddb642a57bf4" exitCode=0 Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.420559 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hrhlg" event={"ID":"68e83779-7285-47ce-927b-e3f862af6367","Type":"ContainerDied","Data":"fe67368fc1e5ba1cb5658efee7b39b7ce7cb7c98edf44a46b8c3ddb642a57bf4"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.422309 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-djlnq" event={"ID":"cd517ac8-3f42-4406-8bb2-dd7f1b87daf7","Type":"ContainerStarted","Data":"62e5b39a66cfb9dc12672187011d393e7dbbbf55fb1e47ef12288efbe9e03f7c"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.422675 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-djlnq" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.431465 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"e87fc594-8585-4df8-97e6-2abda24b3fcc","Type":"ContainerStarted","Data":"a719458eb60bbdbcb025e3fb2492fbcf8afdf83a800d562913a7e2a641448a2a"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.431505 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"e87fc594-8585-4df8-97e6-2abda24b3fcc","Type":"ContainerStarted","Data":"7bbbc57134c69671f31a5cc06287ab23461672dd8ce803c0bac92a891b008378"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.433052 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nmf5" event={"ID":"0fd9ab99-d364-4442-a203-ce8a7b838cf3","Type":"ContainerStarted","Data":"2ad589ae4993de38d6b48392104ac2e0cafe6527ca5f9030066e0b3d3d49e4a9"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.434915 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" event={"ID":"b05f3cd2-b60c-4dc3-9455-bf23335b678a","Type":"ContainerStarted","Data":"d3c188c850d87c7c792ed41ff0caf5ac9c8200733029aedb42a8ae459b0d664d"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.435038 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.436895 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2772f93f-41fd-4817-9c9f-3932e094b0ac","Type":"ContainerStarted","Data":"03101e890deb7136db0e3c63972d14577e2738339a9ac321f3adff0ab21f972d"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.441085 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.441166 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.441230 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.441720 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a50e8acb1a9896b6c0b164453458208e77d6a13aa21290189661d9ca53c2668b"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.441783 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://a50e8acb1a9896b6c0b164453458208e77d6a13aa21290189661d9ca53c2668b" gracePeriod=600 Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.442688 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" event={"ID":"018929d0-9b48-4fda-8f21-1c4d4f6efc4c","Type":"ContainerStarted","Data":"27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c"} Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.442824 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.454611 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c4tj9" podStartSLOduration=24.472431087 podStartE2EDuration="33.454600327s" podCreationTimestamp="2025-11-26 07:03:46 +0000 UTC" firstStartedPulling="2025-11-26 07:04:09.059305099 +0000 UTC m=+944.943193397" lastFinishedPulling="2025-11-26 07:04:18.041474339 +0000 UTC m=+953.925362637" observedRunningTime="2025-11-26 07:04:19.45299051 +0000 UTC m=+955.336878808" watchObservedRunningTime="2025-11-26 07:04:19.454600327 +0000 UTC m=+955.338488625" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.501790 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=19.647370246 podStartE2EDuration="33.501779883s" podCreationTimestamp="2025-11-26 07:03:46 +0000 UTC" firstStartedPulling="2025-11-26 07:04:04.099861675 +0000 UTC m=+939.983749973" lastFinishedPulling="2025-11-26 07:04:17.954271322 +0000 UTC m=+953.838159610" observedRunningTime="2025-11-26 07:04:19.487882012 +0000 UTC m=+955.371770310" watchObservedRunningTime="2025-11-26 07:04:19.501779883 +0000 UTC m=+955.385668181" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.536788 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" podStartSLOduration=11.536777003 podStartE2EDuration="11.536777003s" podCreationTimestamp="2025-11-26 07:04:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:04:19.535611402 +0000 UTC m=+955.419499700" watchObservedRunningTime="2025-11-26 07:04:19.536777003 +0000 UTC m=+955.420665302" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.590127 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2nmf5" podStartSLOduration=7.478095085 podStartE2EDuration="16.590110169s" podCreationTimestamp="2025-11-26 07:04:03 +0000 UTC" firstStartedPulling="2025-11-26 07:04:08.920708201 +0000 UTC m=+944.804596500" lastFinishedPulling="2025-11-26 07:04:18.032723286 +0000 UTC m=+953.916611584" observedRunningTime="2025-11-26 07:04:19.589346513 +0000 UTC m=+955.473234821" watchObservedRunningTime="2025-11-26 07:04:19.590110169 +0000 UTC m=+955.473998467" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.629518 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-2whmr" podStartSLOduration=3.5084613669999998 podStartE2EDuration="11.629506461s" podCreationTimestamp="2025-11-26 07:04:08 +0000 UTC" firstStartedPulling="2025-11-26 07:04:09.922412153 +0000 UTC m=+945.806300451" lastFinishedPulling="2025-11-26 07:04:18.043457247 +0000 UTC m=+953.927345545" observedRunningTime="2025-11-26 07:04:19.626659438 +0000 UTC m=+955.510547736" watchObservedRunningTime="2025-11-26 07:04:19.629506461 +0000 UTC m=+955.513394759" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.659243 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" podStartSLOduration=10.659232692 podStartE2EDuration="10.659232692s" podCreationTimestamp="2025-11-26 07:04:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:04:19.656770021 +0000 UTC m=+955.540658319" watchObservedRunningTime="2025-11-26 07:04:19.659232692 +0000 UTC m=+955.543120980" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.718942 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.727325 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=24.228769377 podStartE2EDuration="36.727311282s" podCreationTimestamp="2025-11-26 07:03:43 +0000 UTC" firstStartedPulling="2025-11-26 07:04:05.453453639 +0000 UTC m=+941.337341936" lastFinishedPulling="2025-11-26 07:04:17.951995543 +0000 UTC m=+953.835883841" observedRunningTime="2025-11-26 07:04:19.715575647 +0000 UTC m=+955.599463945" watchObservedRunningTime="2025-11-26 07:04:19.727311282 +0000 UTC m=+955.611199580" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.771945 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371994.082846 podStartE2EDuration="42.771929953s" podCreationTimestamp="2025-11-26 07:03:37 +0000 UTC" firstStartedPulling="2025-11-26 07:03:39.613347006 +0000 UTC m=+915.497235305" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:04:19.7663227 +0000 UTC m=+955.650210998" watchObservedRunningTime="2025-11-26 07:04:19.771929953 +0000 UTC m=+955.655818251" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.805285 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-657d948df5-j4blw" podUID="e068f599-17d9-4fd1-8fca-d74938c89110" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.94:5353: i/o timeout" Nov 26 07:04:19 crc kubenswrapper[4492]: I1126 07:04:19.837608 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-djlnq" podStartSLOduration=22.339340522 podStartE2EDuration="34.837593422s" podCreationTimestamp="2025-11-26 07:03:45 +0000 UTC" firstStartedPulling="2025-11-26 07:04:05.453780543 +0000 UTC m=+941.337668842" lastFinishedPulling="2025-11-26 07:04:17.952033444 +0000 UTC m=+953.835921742" observedRunningTime="2025-11-26 07:04:19.831219888 +0000 UTC m=+955.715108186" watchObservedRunningTime="2025-11-26 07:04:19.837593422 +0000 UTC m=+955.721481710" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.458137 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hrhlg" event={"ID":"68e83779-7285-47ce-927b-e3f862af6367","Type":"ContainerStarted","Data":"4c5aad6c41d69c8ddddfb583135fadffdf641390ae2f8724dd4379af4be78719"} Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.459498 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hrhlg" event={"ID":"68e83779-7285-47ce-927b-e3f862af6367","Type":"ContainerStarted","Data":"94d1562852dd151f7a17226590633e1f1a8d6aeaa9095ee17961b162e4dd2e33"} Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.459573 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.459637 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.463650 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="a50e8acb1a9896b6c0b164453458208e77d6a13aa21290189661d9ca53c2668b" exitCode=0 Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.463741 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"a50e8acb1a9896b6c0b164453458208e77d6a13aa21290189661d9ca53c2668b"} Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.463833 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"087b801b537b43d8dae36da1027953befbb4ce83f773382d5e7a1b8510080157"} Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.463903 4492 scope.go:117] "RemoveContainer" containerID="3a3d7f8ad7361b1a0985dafaf6a7904b1bcd29d5ae978e67890841e77797b9ac" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.505441 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-hrhlg" podStartSLOduration=23.030273557 podStartE2EDuration="35.505421977s" podCreationTimestamp="2025-11-26 07:03:45 +0000 UTC" firstStartedPulling="2025-11-26 07:04:05.465856818 +0000 UTC m=+941.349745117" lastFinishedPulling="2025-11-26 07:04:17.941005249 +0000 UTC m=+953.824893537" observedRunningTime="2025-11-26 07:04:20.49337142 +0000 UTC m=+956.377259708" watchObservedRunningTime="2025-11-26 07:04:20.505421977 +0000 UTC m=+956.389310276" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.719766 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.903940 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.940751 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75f567bcf9-n86dv"] Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.991623 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6449747765-nngzp"] Nov 26 07:04:20 crc kubenswrapper[4492]: E1126 07:04:20.992098 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e068f599-17d9-4fd1-8fca-d74938c89110" containerName="dnsmasq-dns" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.992190 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e068f599-17d9-4fd1-8fca-d74938c89110" containerName="dnsmasq-dns" Nov 26 07:04:20 crc kubenswrapper[4492]: E1126 07:04:20.992273 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e068f599-17d9-4fd1-8fca-d74938c89110" containerName="init" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.992331 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e068f599-17d9-4fd1-8fca-d74938c89110" containerName="init" Nov 26 07:04:20 crc kubenswrapper[4492]: E1126 07:04:20.992388 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef4332f8-32f1-4ec9-a333-8b8b025151be" containerName="init" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.992441 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef4332f8-32f1-4ec9-a333-8b8b025151be" containerName="init" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.992623 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e068f599-17d9-4fd1-8fca-d74938c89110" containerName="dnsmasq-dns" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.992690 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef4332f8-32f1-4ec9-a333-8b8b025151be" containerName="init" Nov 26 07:04:20 crc kubenswrapper[4492]: I1126 07:04:20.993482 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.007019 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6449747765-nngzp"] Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.065869 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-nb\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.066390 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-sb\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.066479 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-dns-svc\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.066683 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsg78\" (UniqueName: \"kubernetes.io/projected/5bc62996-17f2-4415-a9d5-3219cfb079f9-kube-api-access-bsg78\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.066754 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-config\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.168485 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-config\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.168602 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-nb\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.168632 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-sb\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.168648 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-dns-svc\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.168694 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsg78\" (UniqueName: \"kubernetes.io/projected/5bc62996-17f2-4415-a9d5-3219cfb079f9-kube-api-access-bsg78\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.169773 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-dns-svc\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.169844 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-config\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.169957 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-sb\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.170532 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-nb\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.188466 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsg78\" (UniqueName: \"kubernetes.io/projected/5bc62996-17f2-4415-a9d5-3219cfb079f9-kube-api-access-bsg78\") pod \"dnsmasq-dns-6449747765-nngzp\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.314193 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.318352 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.359785 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.501357 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.502483 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" podUID="018929d0-9b48-4fda-8f21-1c4d4f6efc4c" containerName="dnsmasq-dns" containerID="cri-o://27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c" gracePeriod=10 Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.809162 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6449747765-nngzp"] Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.894119 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.982108 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-ovsdbserver-nb\") pod \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.982203 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-dns-svc\") pod \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.982316 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-config\") pod \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " Nov 26 07:04:21 crc kubenswrapper[4492]: I1126 07:04:21.982494 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtvnb\" (UniqueName: \"kubernetes.io/projected/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-kube-api-access-mtvnb\") pod \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\" (UID: \"018929d0-9b48-4fda-8f21-1c4d4f6efc4c\") " Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.004196 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-kube-api-access-mtvnb" (OuterVolumeSpecName: "kube-api-access-mtvnb") pod "018929d0-9b48-4fda-8f21-1c4d4f6efc4c" (UID: "018929d0-9b48-4fda-8f21-1c4d4f6efc4c"). InnerVolumeSpecName "kube-api-access-mtvnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.037761 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "018929d0-9b48-4fda-8f21-1c4d4f6efc4c" (UID: "018929d0-9b48-4fda-8f21-1c4d4f6efc4c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.046607 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-config" (OuterVolumeSpecName: "config") pod "018929d0-9b48-4fda-8f21-1c4d4f6efc4c" (UID: "018929d0-9b48-4fda-8f21-1c4d4f6efc4c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.049705 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "018929d0-9b48-4fda-8f21-1c4d4f6efc4c" (UID: "018929d0-9b48-4fda-8f21-1c4d4f6efc4c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.085097 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtvnb\" (UniqueName: \"kubernetes.io/projected/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-kube-api-access-mtvnb\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.085124 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.085134 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.085143 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/018929d0-9b48-4fda-8f21-1c4d4f6efc4c-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.133801 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.134101 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="018929d0-9b48-4fda-8f21-1c4d4f6efc4c" containerName="init" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.134117 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="018929d0-9b48-4fda-8f21-1c4d4f6efc4c" containerName="init" Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.134128 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="018929d0-9b48-4fda-8f21-1c4d4f6efc4c" containerName="dnsmasq-dns" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.134135 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="018929d0-9b48-4fda-8f21-1c4d4f6efc4c" containerName="dnsmasq-dns" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.134305 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="018929d0-9b48-4fda-8f21-1c4d4f6efc4c" containerName="dnsmasq-dns" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.138116 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.140242 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.140269 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-vcqx9" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.140983 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.141945 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.161590 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.188342 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.188412 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b96d68d3-ed92-40be-bfed-6143b3cdca02-lock\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.188452 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.188533 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b96d68d3-ed92-40be-bfed-6143b3cdca02-cache\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.188576 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v25vf\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-kube-api-access-v25vf\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.291414 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b96d68d3-ed92-40be-bfed-6143b3cdca02-cache\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.291481 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v25vf\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-kube-api-access-v25vf\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.291539 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.291561 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b96d68d3-ed92-40be-bfed-6143b3cdca02-lock\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.291591 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.291755 4492 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.291769 4492 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.291815 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift podName:b96d68d3-ed92-40be-bfed-6143b3cdca02 nodeName:}" failed. No retries permitted until 2025-11-26 07:04:22.791798181 +0000 UTC m=+958.675686479 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift") pod "swift-storage-0" (UID: "b96d68d3-ed92-40be-bfed-6143b3cdca02") : configmap "swift-ring-files" not found Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.292402 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b96d68d3-ed92-40be-bfed-6143b3cdca02-cache\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.292771 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.295229 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b96d68d3-ed92-40be-bfed-6143b3cdca02-lock\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.319206 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v25vf\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-kube-api-access-v25vf\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.319694 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.510400 4492 generic.go:334] "Generic (PLEG): container finished" podID="018929d0-9b48-4fda-8f21-1c4d4f6efc4c" containerID="27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c" exitCode=0 Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.510682 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" event={"ID":"018929d0-9b48-4fda-8f21-1c4d4f6efc4c","Type":"ContainerDied","Data":"27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c"} Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.510714 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" event={"ID":"018929d0-9b48-4fda-8f21-1c4d4f6efc4c","Type":"ContainerDied","Data":"95ebff9cbbb40af453d29c4b668cab4c9a21eb68726d214d68f9feaa206a31fb"} Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.510730 4492 scope.go:117] "RemoveContainer" containerID="27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.510878 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75f567bcf9-n86dv" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.516265 4492 generic.go:334] "Generic (PLEG): container finished" podID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerID="8a9f806c64ddc0fedd6019b388d9f6ed08ae7de9c66465ceee2c48f93f85d875" exitCode=0 Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.516303 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6449747765-nngzp" event={"ID":"5bc62996-17f2-4415-a9d5-3219cfb079f9","Type":"ContainerDied","Data":"8a9f806c64ddc0fedd6019b388d9f6ed08ae7de9c66465ceee2c48f93f85d875"} Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.516325 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6449747765-nngzp" event={"ID":"5bc62996-17f2-4415-a9d5-3219cfb079f9","Type":"ContainerStarted","Data":"b613ab9e36662d97f786b17a9ea57584a570c5bf49a3db05bcd2ad894a05acd0"} Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.615982 4492 scope.go:117] "RemoveContainer" containerID="90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.628221 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75f567bcf9-n86dv"] Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.631270 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75f567bcf9-n86dv"] Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.666843 4492 scope.go:117] "RemoveContainer" containerID="27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c" Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.670139 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c\": container with ID starting with 27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c not found: ID does not exist" containerID="27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.670193 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c"} err="failed to get container status \"27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c\": rpc error: code = NotFound desc = could not find container \"27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c\": container with ID starting with 27be265b4f984a85691852ae9107da8bba23556f734cc283361008fb64d5654c not found: ID does not exist" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.670218 4492 scope.go:117] "RemoveContainer" containerID="90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8" Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.670479 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8\": container with ID starting with 90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8 not found: ID does not exist" containerID="90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.670501 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8"} err="failed to get container status \"90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8\": rpc error: code = NotFound desc = could not find container \"90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8\": container with ID starting with 90cfa4141819d30f923aeb3d74ab72c8b62177f58e7fcd2372066ac5dee6a7d8 not found: ID does not exist" Nov 26 07:04:22 crc kubenswrapper[4492]: I1126 07:04:22.801209 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.801487 4492 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.801527 4492 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:04:22 crc kubenswrapper[4492]: E1126 07:04:22.801575 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift podName:b96d68d3-ed92-40be-bfed-6143b3cdca02 nodeName:}" failed. No retries permitted until 2025-11-26 07:04:23.801558802 +0000 UTC m=+959.685447100 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift") pod "swift-storage-0" (UID: "b96d68d3-ed92-40be-bfed-6143b3cdca02") : configmap "swift-ring-files" not found Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.343592 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.524668 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6449747765-nngzp" event={"ID":"5bc62996-17f2-4415-a9d5-3219cfb079f9","Type":"ContainerStarted","Data":"60384fa21f2d35fc5f7de26f3616131eeca11ebd6345a223a104f8bd041f95a3"} Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.524745 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.550724 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6449747765-nngzp" podStartSLOduration=3.550708176 podStartE2EDuration="3.550708176s" podCreationTimestamp="2025-11-26 07:04:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:04:23.545883775 +0000 UTC m=+959.429772072" watchObservedRunningTime="2025-11-26 07:04:23.550708176 +0000 UTC m=+959.434596474" Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.698083 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.747146 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.806206 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.821728 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:23 crc kubenswrapper[4492]: E1126 07:04:23.822327 4492 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:04:23 crc kubenswrapper[4492]: E1126 07:04:23.822352 4492 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:04:23 crc kubenswrapper[4492]: E1126 07:04:23.822399 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift podName:b96d68d3-ed92-40be-bfed-6143b3cdca02 nodeName:}" failed. No retries permitted until 2025-11-26 07:04:25.822382902 +0000 UTC m=+961.706271200 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift") pod "swift-storage-0" (UID: "b96d68d3-ed92-40be-bfed-6143b3cdca02") : configmap "swift-ring-files" not found Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.938688 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.939995 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 07:04:23 crc kubenswrapper[4492]: W1126 07:04:23.941698 4492 reflector.go:561] object-"openstack"/"ovnnorthd-config": failed to list *v1.ConfigMap: configmaps "ovnnorthd-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 26 07:04:23 crc kubenswrapper[4492]: E1126 07:04:23.941737 4492 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"ovnnorthd-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"ovnnorthd-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 07:04:23 crc kubenswrapper[4492]: W1126 07:04:23.941859 4492 reflector.go:561] object-"openstack"/"ovnnorthd-scripts": failed to list *v1.ConfigMap: configmaps "ovnnorthd-scripts" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 26 07:04:23 crc kubenswrapper[4492]: E1126 07:04:23.941871 4492 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"ovnnorthd-scripts\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"ovnnorthd-scripts\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 07:04:23 crc kubenswrapper[4492]: W1126 07:04:23.941902 4492 reflector.go:561] object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-lm2pz": failed to list *v1.Secret: secrets "ovnnorthd-ovnnorthd-dockercfg-lm2pz" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 26 07:04:23 crc kubenswrapper[4492]: E1126 07:04:23.941947 4492 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"ovnnorthd-ovnnorthd-dockercfg-lm2pz\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovnnorthd-ovnnorthd-dockercfg-lm2pz\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 07:04:23 crc kubenswrapper[4492]: W1126 07:04:23.941919 4492 reflector.go:561] object-"openstack"/"cert-ovnnorthd-ovndbs": failed to list *v1.Secret: secrets "cert-ovnnorthd-ovndbs" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 26 07:04:23 crc kubenswrapper[4492]: E1126 07:04:23.941972 4492 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"cert-ovnnorthd-ovndbs\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cert-ovnnorthd-ovndbs\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.948610 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.974360 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:23 crc kubenswrapper[4492]: I1126 07:04:23.974405 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.026611 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-scripts\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.026812 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.026934 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.027015 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.027221 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.027319 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zv5vz\" (UniqueName: \"kubernetes.io/projected/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-kube-api-access-zv5vz\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.027530 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-config\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.128787 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zv5vz\" (UniqueName: \"kubernetes.io/projected/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-kube-api-access-zv5vz\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.128862 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-config\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.128974 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-scripts\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.128996 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.129016 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.129037 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.129089 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.129566 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.134506 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.149371 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.155607 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zv5vz\" (UniqueName: \"kubernetes.io/projected/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-kube-api-access-zv5vz\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.224040 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-576bz"] Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.453619 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="018929d0-9b48-4fda-8f21-1c4d4f6efc4c" path="/var/lib/kubelet/pods/018929d0-9b48-4fda-8f21-1c4d4f6efc4c/volumes" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.534942 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-576bz" podUID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerName="registry-server" containerID="cri-o://6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6" gracePeriod=2 Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.767079 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:24 crc kubenswrapper[4492]: I1126 07:04:24.981412 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.016897 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.029091 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-lm2pz" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.031834 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.037591 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.042296 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2nmf5" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerName="registry-server" probeResult="failure" output=< Nov 26 07:04:25 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 07:04:25 crc kubenswrapper[4492]: > Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.043063 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-scripts\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.107895 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.110929 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8efa287b-6b96-4a2b-8ce0-4a086b2821ce-config\") pod \"ovn-northd-0\" (UID: \"8efa287b-6b96-4a2b-8ce0-4a086b2821ce\") " pod="openstack/ovn-northd-0" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.157896 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-utilities\") pod \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.158069 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-catalog-content\") pod \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.158102 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bs59\" (UniqueName: \"kubernetes.io/projected/da5f9543-4f48-47b1-9164-d2e736a1bc6f-kube-api-access-2bs59\") pod \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\" (UID: \"da5f9543-4f48-47b1-9164-d2e736a1bc6f\") " Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.161302 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.161332 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-utilities" (OuterVolumeSpecName: "utilities") pod "da5f9543-4f48-47b1-9164-d2e736a1bc6f" (UID: "da5f9543-4f48-47b1-9164-d2e736a1bc6f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.168709 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da5f9543-4f48-47b1-9164-d2e736a1bc6f-kube-api-access-2bs59" (OuterVolumeSpecName: "kube-api-access-2bs59") pod "da5f9543-4f48-47b1-9164-d2e736a1bc6f" (UID: "da5f9543-4f48-47b1-9164-d2e736a1bc6f"). InnerVolumeSpecName "kube-api-access-2bs59". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.184972 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "da5f9543-4f48-47b1-9164-d2e736a1bc6f" (UID: "da5f9543-4f48-47b1-9164-d2e736a1bc6f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.260733 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.260998 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bs59\" (UniqueName: \"kubernetes.io/projected/da5f9543-4f48-47b1-9164-d2e736a1bc6f-kube-api-access-2bs59\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.261009 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da5f9543-4f48-47b1-9164-d2e736a1bc6f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.552502 4492 generic.go:334] "Generic (PLEG): container finished" podID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerID="6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6" exitCode=0 Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.552569 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-576bz" event={"ID":"da5f9543-4f48-47b1-9164-d2e736a1bc6f","Type":"ContainerDied","Data":"6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6"} Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.552609 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-576bz" event={"ID":"da5f9543-4f48-47b1-9164-d2e736a1bc6f","Type":"ContainerDied","Data":"10dcfe2a33f1dc3cd68ac32475c8d85a6097c0fa9ec309ed29e456af66e1a8fa"} Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.552610 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-576bz" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.552631 4492 scope.go:117] "RemoveContainer" containerID="6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.594568 4492 scope.go:117] "RemoveContainer" containerID="035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.598337 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-576bz"] Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.605289 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-576bz"] Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.619743 4492 scope.go:117] "RemoveContainer" containerID="4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.643026 4492 scope.go:117] "RemoveContainer" containerID="6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6" Nov 26 07:04:25 crc kubenswrapper[4492]: E1126 07:04:25.643380 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6\": container with ID starting with 6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6 not found: ID does not exist" containerID="6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.643478 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6"} err="failed to get container status \"6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6\": rpc error: code = NotFound desc = could not find container \"6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6\": container with ID starting with 6dee9d093fe1e642db407e8cfbad1545cc4ba2155f3b295b7a8f55d6dc6ce9b6 not found: ID does not exist" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.643563 4492 scope.go:117] "RemoveContainer" containerID="035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4" Nov 26 07:04:25 crc kubenswrapper[4492]: E1126 07:04:25.643848 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4\": container with ID starting with 035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4 not found: ID does not exist" containerID="035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.643934 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4"} err="failed to get container status \"035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4\": rpc error: code = NotFound desc = could not find container \"035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4\": container with ID starting with 035303380df119f8448e03f483e9ccf7d1100300dd6ed9c86e63453df95493b4 not found: ID does not exist" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.644015 4492 scope.go:117] "RemoveContainer" containerID="4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0" Nov 26 07:04:25 crc kubenswrapper[4492]: E1126 07:04:25.644316 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0\": container with ID starting with 4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0 not found: ID does not exist" containerID="4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.644398 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0"} err="failed to get container status \"4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0\": rpc error: code = NotFound desc = could not find container \"4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0\": container with ID starting with 4e8c7288cb2ecc7ccb4d4059592ac82ac794d13616d767cc6279f4ee211621f0 not found: ID does not exist" Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.678933 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 07:04:25 crc kubenswrapper[4492]: I1126 07:04:25.873206 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:25 crc kubenswrapper[4492]: E1126 07:04:25.873419 4492 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:04:25 crc kubenswrapper[4492]: E1126 07:04:25.873447 4492 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:04:25 crc kubenswrapper[4492]: E1126 07:04:25.873507 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift podName:b96d68d3-ed92-40be-bfed-6143b3cdca02 nodeName:}" failed. No retries permitted until 2025-11-26 07:04:29.873490388 +0000 UTC m=+965.757378676 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift") pod "swift-storage-0" (UID: "b96d68d3-ed92-40be-bfed-6143b3cdca02") : configmap "swift-ring-files" not found Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.069699 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-q589k"] Nov 26 07:04:26 crc kubenswrapper[4492]: E1126 07:04:26.088651 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerName="registry-server" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.088788 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerName="registry-server" Nov 26 07:04:26 crc kubenswrapper[4492]: E1126 07:04:26.088850 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerName="extract-content" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.088896 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerName="extract-content" Nov 26 07:04:26 crc kubenswrapper[4492]: E1126 07:04:26.088965 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerName="extract-utilities" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.089035 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerName="extract-utilities" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.089280 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" containerName="registry-server" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.089796 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-q589k"] Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.089971 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-q589k"] Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.089950 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: E1126 07:04:26.091281 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-cxtxn ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-cxtxn ring-data-devices scripts swiftconf]: context canceled" pod="openstack/swift-ring-rebalance-q589k" podUID="9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.125236 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-kcsnv"] Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.126403 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.128798 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.129503 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.131423 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.141631 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-kcsnv"] Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.185990 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-ring-data-devices\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.186041 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxtxn\" (UniqueName: \"kubernetes.io/projected/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-kube-api-access-cxtxn\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.186091 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-combined-ca-bundle\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.186182 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-scripts\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.186205 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-swiftconf\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.186323 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-dispersionconf\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.186381 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-etc-swift\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.288497 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-combined-ca-bundle\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.288548 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-dispersionconf\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.288602 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-ring-data-devices\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.288633 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-etc-swift\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.288726 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-combined-ca-bundle\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.288786 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-scripts\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.288836 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-scripts\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.288900 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-swiftconf\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.288983 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-dispersionconf\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.289017 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-etc-swift\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.289147 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-ring-data-devices\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.289210 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxtxn\" (UniqueName: \"kubernetes.io/projected/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-kube-api-access-cxtxn\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.289289 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsg26\" (UniqueName: \"kubernetes.io/projected/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-kube-api-access-wsg26\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.289350 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-swiftconf\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.289644 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-scripts\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.289742 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-etc-swift\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.290316 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-ring-data-devices\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.310364 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-dispersionconf\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.313897 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-combined-ca-bundle\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.313937 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxtxn\" (UniqueName: \"kubernetes.io/projected/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-kube-api-access-cxtxn\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.317196 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-swiftconf\") pod \"swift-ring-rebalance-q589k\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.391383 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsg26\" (UniqueName: \"kubernetes.io/projected/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-kube-api-access-wsg26\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.391433 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-swiftconf\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.391469 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-dispersionconf\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.391510 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-ring-data-devices\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.391528 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-etc-swift\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.391546 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-combined-ca-bundle\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.391565 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-scripts\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.392700 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-scripts\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.392867 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-ring-data-devices\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.393654 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-etc-swift\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.396438 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-swiftconf\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.396897 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-dispersionconf\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.397727 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-combined-ca-bundle\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.406164 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsg26\" (UniqueName: \"kubernetes.io/projected/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-kube-api-access-wsg26\") pod \"swift-ring-rebalance-kcsnv\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.449388 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da5f9543-4f48-47b1-9164-d2e736a1bc6f" path="/var/lib/kubelet/pods/da5f9543-4f48-47b1-9164-d2e736a1bc6f/volumes" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.453119 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-vcqx9" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.462187 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.561930 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8efa287b-6b96-4a2b-8ce0-4a086b2821ce","Type":"ContainerStarted","Data":"b1908ac35a3c39ef240b9a6241cc1e02a40f24fc110de3cd886e658be8b3b94d"} Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.565800 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.568983 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.570234 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.583404 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.630924 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.701925 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-swiftconf\") pod \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.701997 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-scripts\") pod \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.702071 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-etc-swift\") pod \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.702198 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-combined-ca-bundle\") pod \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.702352 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-dispersionconf\") pod \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.702430 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxtxn\" (UniqueName: \"kubernetes.io/projected/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-kube-api-access-cxtxn\") pod \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.702477 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-ring-data-devices\") pod \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\" (UID: \"9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3\") " Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.702709 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-scripts" (OuterVolumeSpecName: "scripts") pod "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3" (UID: "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.702914 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3" (UID: "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.703452 4492 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.703471 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.704801 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3" (UID: "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.723010 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3" (UID: "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.723046 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3" (UID: "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.723279 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3" (UID: "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.728991 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-kube-api-access-cxtxn" (OuterVolumeSpecName: "kube-api-access-cxtxn") pod "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3" (UID: "9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3"). InnerVolumeSpecName "kube-api-access-cxtxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.806680 4492 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.806719 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.806733 4492 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.806749 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxtxn\" (UniqueName: \"kubernetes.io/projected/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-kube-api-access-cxtxn\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.806759 4492 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:26 crc kubenswrapper[4492]: I1126 07:04:26.925830 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-kcsnv"] Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.008427 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.008475 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.049798 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:04:27 crc kubenswrapper[4492]: W1126 07:04:27.161974 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4ff9a37_0e51_4210_8b06_9c10d0c1c3ba.slice/crio-8e9900afd35ad8a927239812ed9cb5de7cfcf70769647ac2b664166d74c08a14 WatchSource:0}: Error finding container 8e9900afd35ad8a927239812ed9cb5de7cfcf70769647ac2b664166d74c08a14: Status 404 returned error can't find the container with id 8e9900afd35ad8a927239812ed9cb5de7cfcf70769647ac2b664166d74c08a14 Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.313451 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.314752 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.396427 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.590595 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-kcsnv" event={"ID":"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba","Type":"ContainerStarted","Data":"8e9900afd35ad8a927239812ed9cb5de7cfcf70769647ac2b664166d74c08a14"} Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.593863 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-q589k" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.594550 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8efa287b-6b96-4a2b-8ce0-4a086b2821ce","Type":"ContainerStarted","Data":"bdc47de3ced9d224d4fef0a67daf970c19587fc4330b0a8970a6b167634fad02"} Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.595808 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.595854 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8efa287b-6b96-4a2b-8ce0-4a086b2821ce","Type":"ContainerStarted","Data":"b9b569e0465ce0b1cd07146f8b97cbbf29d84c4a6b72416dd257af157d1e070f"} Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.621793 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.122619994 podStartE2EDuration="4.621770544s" podCreationTimestamp="2025-11-26 07:04:23 +0000 UTC" firstStartedPulling="2025-11-26 07:04:25.686700339 +0000 UTC m=+961.570588627" lastFinishedPulling="2025-11-26 07:04:27.185850879 +0000 UTC m=+963.069739177" observedRunningTime="2025-11-26 07:04:27.619021686 +0000 UTC m=+963.502909985" watchObservedRunningTime="2025-11-26 07:04:27.621770544 +0000 UTC m=+963.505658843" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.650389 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.655837 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.671852 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-q589k"] Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.677747 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-q589k"] Nov 26 07:04:27 crc kubenswrapper[4492]: I1126 07:04:27.708739 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.448158 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3" path="/var/lib/kubelet/pods/9d5ae5ba-ece2-4159-8f1b-fea0bcde9db3/volumes" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.574417 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-19de-account-create-update-n48h2"] Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.575790 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.578052 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.604947 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-19de-account-create-update-n48h2"] Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.622304 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-m9nq2"] Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.623632 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.643003 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-m9nq2"] Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.652973 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52n8j\" (UniqueName: \"kubernetes.io/projected/38aafecf-4ce9-460d-8a3d-aa42b566ab81-kube-api-access-52n8j\") pod \"keystone-19de-account-create-update-n48h2\" (UID: \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\") " pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.653228 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38aafecf-4ce9-460d-8a3d-aa42b566ab81-operator-scripts\") pod \"keystone-19de-account-create-update-n48h2\" (UID: \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\") " pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.754857 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52n8j\" (UniqueName: \"kubernetes.io/projected/38aafecf-4ce9-460d-8a3d-aa42b566ab81-kube-api-access-52n8j\") pod \"keystone-19de-account-create-update-n48h2\" (UID: \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\") " pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.754933 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rzzc\" (UniqueName: \"kubernetes.io/projected/7788ab37-0666-48ca-aff9-b145fc248e4c-kube-api-access-2rzzc\") pod \"keystone-db-create-m9nq2\" (UID: \"7788ab37-0666-48ca-aff9-b145fc248e4c\") " pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.755024 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7788ab37-0666-48ca-aff9-b145fc248e4c-operator-scripts\") pod \"keystone-db-create-m9nq2\" (UID: \"7788ab37-0666-48ca-aff9-b145fc248e4c\") " pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.755053 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38aafecf-4ce9-460d-8a3d-aa42b566ab81-operator-scripts\") pod \"keystone-19de-account-create-update-n48h2\" (UID: \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\") " pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.757553 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38aafecf-4ce9-460d-8a3d-aa42b566ab81-operator-scripts\") pod \"keystone-19de-account-create-update-n48h2\" (UID: \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\") " pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.772160 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52n8j\" (UniqueName: \"kubernetes.io/projected/38aafecf-4ce9-460d-8a3d-aa42b566ab81-kube-api-access-52n8j\") pod \"keystone-19de-account-create-update-n48h2\" (UID: \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\") " pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.846692 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-mwl7j"] Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.847894 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.854564 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.854595 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.857369 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7788ab37-0666-48ca-aff9-b145fc248e4c-operator-scripts\") pod \"keystone-db-create-m9nq2\" (UID: \"7788ab37-0666-48ca-aff9-b145fc248e4c\") " pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.857545 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rzzc\" (UniqueName: \"kubernetes.io/projected/7788ab37-0666-48ca-aff9-b145fc248e4c-kube-api-access-2rzzc\") pod \"keystone-db-create-m9nq2\" (UID: \"7788ab37-0666-48ca-aff9-b145fc248e4c\") " pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.858842 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7788ab37-0666-48ca-aff9-b145fc248e4c-operator-scripts\") pod \"keystone-db-create-m9nq2\" (UID: \"7788ab37-0666-48ca-aff9-b145fc248e4c\") " pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.886922 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rzzc\" (UniqueName: \"kubernetes.io/projected/7788ab37-0666-48ca-aff9-b145fc248e4c-kube-api-access-2rzzc\") pod \"keystone-db-create-m9nq2\" (UID: \"7788ab37-0666-48ca-aff9-b145fc248e4c\") " pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.887006 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-mwl7j"] Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.901311 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.911473 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-b651-account-create-update-w2d6t"] Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.921612 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.931843 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.944167 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.960692 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt4th\" (UniqueName: \"kubernetes.io/projected/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-kube-api-access-gt4th\") pod \"placement-db-create-mwl7j\" (UID: \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\") " pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.960918 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-operator-scripts\") pod \"placement-db-create-mwl7j\" (UID: \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\") " pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:28 crc kubenswrapper[4492]: I1126 07:04:28.991080 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b651-account-create-update-w2d6t"] Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.023791 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.034784 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kh56n"] Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.064494 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-operator-scripts\") pod \"placement-db-create-mwl7j\" (UID: \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\") " pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.064668 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnzv8\" (UniqueName: \"kubernetes.io/projected/af5046a4-bfe1-474f-8954-ada04116efa6-kube-api-access-cnzv8\") pod \"placement-b651-account-create-update-w2d6t\" (UID: \"af5046a4-bfe1-474f-8954-ada04116efa6\") " pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.064760 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt4th\" (UniqueName: \"kubernetes.io/projected/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-kube-api-access-gt4th\") pod \"placement-db-create-mwl7j\" (UID: \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\") " pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.064837 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af5046a4-bfe1-474f-8954-ada04116efa6-operator-scripts\") pod \"placement-b651-account-create-update-w2d6t\" (UID: \"af5046a4-bfe1-474f-8954-ada04116efa6\") " pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.065892 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-operator-scripts\") pod \"placement-db-create-mwl7j\" (UID: \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\") " pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.099810 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt4th\" (UniqueName: \"kubernetes.io/projected/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-kube-api-access-gt4th\") pod \"placement-db-create-mwl7j\" (UID: \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\") " pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.166757 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af5046a4-bfe1-474f-8954-ada04116efa6-operator-scripts\") pod \"placement-b651-account-create-update-w2d6t\" (UID: \"af5046a4-bfe1-474f-8954-ada04116efa6\") " pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.167247 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnzv8\" (UniqueName: \"kubernetes.io/projected/af5046a4-bfe1-474f-8954-ada04116efa6-kube-api-access-cnzv8\") pod \"placement-b651-account-create-update-w2d6t\" (UID: \"af5046a4-bfe1-474f-8954-ada04116efa6\") " pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.167474 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af5046a4-bfe1-474f-8954-ada04116efa6-operator-scripts\") pod \"placement-b651-account-create-update-w2d6t\" (UID: \"af5046a4-bfe1-474f-8954-ada04116efa6\") " pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.173458 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.182256 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnzv8\" (UniqueName: \"kubernetes.io/projected/af5046a4-bfe1-474f-8954-ada04116efa6-kube-api-access-cnzv8\") pod \"placement-b651-account-create-update-w2d6t\" (UID: \"af5046a4-bfe1-474f-8954-ada04116efa6\") " pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.256667 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.472489 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-19de-account-create-update-n48h2"] Nov 26 07:04:29 crc kubenswrapper[4492]: W1126 07:04:29.489994 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38aafecf_4ce9_460d_8a3d_aa42b566ab81.slice/crio-cd1fbba9f6bdda84abbf6aa4ac70e11aa29310f0b5aa98a0544bae887cb3e455 WatchSource:0}: Error finding container cd1fbba9f6bdda84abbf6aa4ac70e11aa29310f0b5aa98a0544bae887cb3e455: Status 404 returned error can't find the container with id cd1fbba9f6bdda84abbf6aa4ac70e11aa29310f0b5aa98a0544bae887cb3e455 Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.565268 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-m9nq2"] Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.633541 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-19de-account-create-update-n48h2" event={"ID":"38aafecf-4ce9-460d-8a3d-aa42b566ab81","Type":"ContainerStarted","Data":"cd1fbba9f6bdda84abbf6aa4ac70e11aa29310f0b5aa98a0544bae887cb3e455"} Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.639793 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-m9nq2" event={"ID":"7788ab37-0666-48ca-aff9-b145fc248e4c","Type":"ContainerStarted","Data":"a7bdd5bb881fd447a00d29cd137f3ec738e684aa98994e54bbf570e765462f79"} Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.675709 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-mwl7j"] Nov 26 07:04:29 crc kubenswrapper[4492]: W1126 07:04:29.712013 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ff43c91_26eb_40da_a2cc_9d07c1e3be1c.slice/crio-06c8c31247fee6df23bb8c3a5844d686452fff45f2622326f946515dc17f61be WatchSource:0}: Error finding container 06c8c31247fee6df23bb8c3a5844d686452fff45f2622326f946515dc17f61be: Status 404 returned error can't find the container with id 06c8c31247fee6df23bb8c3a5844d686452fff45f2622326f946515dc17f61be Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.813903 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b651-account-create-update-w2d6t"] Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.837605 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 26 07:04:29 crc kubenswrapper[4492]: W1126 07:04:29.866297 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf5046a4_bfe1_474f_8954_ada04116efa6.slice/crio-fe9aeb17be1184a1ff39f2dd2d25cb001ffb1f357cdce910ea7bc9469692e23f WatchSource:0}: Error finding container fe9aeb17be1184a1ff39f2dd2d25cb001ffb1f357cdce910ea7bc9469692e23f: Status 404 returned error can't find the container with id fe9aeb17be1184a1ff39f2dd2d25cb001ffb1f357cdce910ea7bc9469692e23f Nov 26 07:04:29 crc kubenswrapper[4492]: I1126 07:04:29.886836 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:29 crc kubenswrapper[4492]: E1126 07:04:29.887015 4492 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:04:29 crc kubenswrapper[4492]: E1126 07:04:29.887035 4492 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:04:29 crc kubenswrapper[4492]: E1126 07:04:29.887074 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift podName:b96d68d3-ed92-40be-bfed-6143b3cdca02 nodeName:}" failed. No retries permitted until 2025-11-26 07:04:37.887061964 +0000 UTC m=+973.770950262 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift") pod "swift-storage-0" (UID: "b96d68d3-ed92-40be-bfed-6143b3cdca02") : configmap "swift-ring-files" not found Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.020896 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c4tj9"] Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.021301 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c4tj9" podUID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerName="registry-server" containerID="cri-o://667fe7f9fac92792f20759960bbdb77314e590e81b72b869b4c91e79661f1c69" gracePeriod=2 Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.648926 4492 generic.go:334] "Generic (PLEG): container finished" podID="38aafecf-4ce9-460d-8a3d-aa42b566ab81" containerID="50a19b459fca099d754e857e8a42c1a6a26f7f26b34b2fbc6fc3a6f17dc0355d" exitCode=0 Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.649348 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-19de-account-create-update-n48h2" event={"ID":"38aafecf-4ce9-460d-8a3d-aa42b566ab81","Type":"ContainerDied","Data":"50a19b459fca099d754e857e8a42c1a6a26f7f26b34b2fbc6fc3a6f17dc0355d"} Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.651282 4492 generic.go:334] "Generic (PLEG): container finished" podID="af5046a4-bfe1-474f-8954-ada04116efa6" containerID="df30eded3806eebf464d5499f35ee4fcef7419a8cc33fabd25ef30117d8e85ea" exitCode=0 Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.651325 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b651-account-create-update-w2d6t" event={"ID":"af5046a4-bfe1-474f-8954-ada04116efa6","Type":"ContainerDied","Data":"df30eded3806eebf464d5499f35ee4fcef7419a8cc33fabd25ef30117d8e85ea"} Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.651341 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b651-account-create-update-w2d6t" event={"ID":"af5046a4-bfe1-474f-8954-ada04116efa6","Type":"ContainerStarted","Data":"fe9aeb17be1184a1ff39f2dd2d25cb001ffb1f357cdce910ea7bc9469692e23f"} Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.655291 4492 generic.go:334] "Generic (PLEG): container finished" podID="7788ab37-0666-48ca-aff9-b145fc248e4c" containerID="a29190ed28c314ec902e58e562106baebba6067c4ffa1da33fcdcb25bf62f805" exitCode=0 Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.655338 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-m9nq2" event={"ID":"7788ab37-0666-48ca-aff9-b145fc248e4c","Type":"ContainerDied","Data":"a29190ed28c314ec902e58e562106baebba6067c4ffa1da33fcdcb25bf62f805"} Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.658584 4492 generic.go:334] "Generic (PLEG): container finished" podID="0ff43c91-26eb-40da-a2cc-9d07c1e3be1c" containerID="52fff9fdbf3891e54d5df5dedee4db6dd8022e4e02bdb8cbd05bb01b22b1e39f" exitCode=0 Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.658701 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-mwl7j" event={"ID":"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c","Type":"ContainerDied","Data":"52fff9fdbf3891e54d5df5dedee4db6dd8022e4e02bdb8cbd05bb01b22b1e39f"} Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.658740 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-mwl7j" event={"ID":"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c","Type":"ContainerStarted","Data":"06c8c31247fee6df23bb8c3a5844d686452fff45f2622326f946515dc17f61be"} Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.668508 4492 generic.go:334] "Generic (PLEG): container finished" podID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerID="667fe7f9fac92792f20759960bbdb77314e590e81b72b869b4c91e79661f1c69" exitCode=0 Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.669430 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4tj9" event={"ID":"a883fb65-c766-4a13-bac0-177d4ffe2de2","Type":"ContainerDied","Data":"667fe7f9fac92792f20759960bbdb77314e590e81b72b869b4c91e79661f1c69"} Nov 26 07:04:30 crc kubenswrapper[4492]: I1126 07:04:30.669592 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kh56n" podUID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerName="registry-server" containerID="cri-o://c8fa541244d89f0dd6566869f9096b18ab4e232557c168d4547df0c00453667a" gracePeriod=2 Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.320022 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.365796 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66f998579f-v5z4b"] Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.366746 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" podUID="b05f3cd2-b60c-4dc3-9455-bf23335b678a" containerName="dnsmasq-dns" containerID="cri-o://d3c188c850d87c7c792ed41ff0caf5ac9c8200733029aedb42a8ae459b0d664d" gracePeriod=10 Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.587025 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.678073 4492 generic.go:334] "Generic (PLEG): container finished" podID="b05f3cd2-b60c-4dc3-9455-bf23335b678a" containerID="d3c188c850d87c7c792ed41ff0caf5ac9c8200733029aedb42a8ae459b0d664d" exitCode=0 Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.678134 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" event={"ID":"b05f3cd2-b60c-4dc3-9455-bf23335b678a","Type":"ContainerDied","Data":"d3c188c850d87c7c792ed41ff0caf5ac9c8200733029aedb42a8ae459b0d664d"} Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.679665 4492 generic.go:334] "Generic (PLEG): container finished" podID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerID="c8fa541244d89f0dd6566869f9096b18ab4e232557c168d4547df0c00453667a" exitCode=0 Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.679709 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh56n" event={"ID":"b9ccf1b8-a1eb-4501-b96c-929efdde7c47","Type":"ContainerDied","Data":"c8fa541244d89f0dd6566869f9096b18ab4e232557c168d4547df0c00453667a"} Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.681360 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c4tj9" Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.681794 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4tj9" event={"ID":"a883fb65-c766-4a13-bac0-177d4ffe2de2","Type":"ContainerDied","Data":"e722f401c80fe89a3a44571ede359e47abfaf53b8d94a436e350dbfe4afa20cb"} Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.681821 4492 scope.go:117] "RemoveContainer" containerID="667fe7f9fac92792f20759960bbdb77314e590e81b72b869b4c91e79661f1c69" Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.753127 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-utilities\") pod \"a883fb65-c766-4a13-bac0-177d4ffe2de2\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.753311 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wfjp\" (UniqueName: \"kubernetes.io/projected/a883fb65-c766-4a13-bac0-177d4ffe2de2-kube-api-access-4wfjp\") pod \"a883fb65-c766-4a13-bac0-177d4ffe2de2\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.753367 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-catalog-content\") pod \"a883fb65-c766-4a13-bac0-177d4ffe2de2\" (UID: \"a883fb65-c766-4a13-bac0-177d4ffe2de2\") " Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.756890 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-utilities" (OuterVolumeSpecName: "utilities") pod "a883fb65-c766-4a13-bac0-177d4ffe2de2" (UID: "a883fb65-c766-4a13-bac0-177d4ffe2de2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.783438 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a883fb65-c766-4a13-bac0-177d4ffe2de2-kube-api-access-4wfjp" (OuterVolumeSpecName: "kube-api-access-4wfjp") pod "a883fb65-c766-4a13-bac0-177d4ffe2de2" (UID: "a883fb65-c766-4a13-bac0-177d4ffe2de2"). InnerVolumeSpecName "kube-api-access-4wfjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.811066 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a883fb65-c766-4a13-bac0-177d4ffe2de2" (UID: "a883fb65-c766-4a13-bac0-177d4ffe2de2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.855655 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wfjp\" (UniqueName: \"kubernetes.io/projected/a883fb65-c766-4a13-bac0-177d4ffe2de2-kube-api-access-4wfjp\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.855680 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:31 crc kubenswrapper[4492]: I1126 07:04:31.855689 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a883fb65-c766-4a13-bac0-177d4ffe2de2-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:32 crc kubenswrapper[4492]: I1126 07:04:32.023656 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c4tj9"] Nov 26 07:04:32 crc kubenswrapper[4492]: I1126 07:04:32.028850 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c4tj9"] Nov 26 07:04:32 crc kubenswrapper[4492]: I1126 07:04:32.458433 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a883fb65-c766-4a13-bac0-177d4ffe2de2" path="/var/lib/kubelet/pods/a883fb65-c766-4a13-bac0-177d4ffe2de2/volumes" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.103255 4492 scope.go:117] "RemoveContainer" containerID="25a9281aadab501ac13bdd51fe0230fc2490922ad9c8717d3b353da9dc1992dd" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.213578 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.220025 4492 scope.go:117] "RemoveContainer" containerID="8f2597a58fb32da3dfeb32a3d8dcc27d91be2a0ffb55f795e752a457797b34ee" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.246360 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.250366 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.257111 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.275431 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.298743 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.389465 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrf9x\" (UniqueName: \"kubernetes.io/projected/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-kube-api-access-xrf9x\") pod \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.389607 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af5046a4-bfe1-474f-8954-ada04116efa6-operator-scripts\") pod \"af5046a4-bfe1-474f-8954-ada04116efa6\" (UID: \"af5046a4-bfe1-474f-8954-ada04116efa6\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.389641 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7788ab37-0666-48ca-aff9-b145fc248e4c-operator-scripts\") pod \"7788ab37-0666-48ca-aff9-b145fc248e4c\" (UID: \"7788ab37-0666-48ca-aff9-b145fc248e4c\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.389701 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-utilities\") pod \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.389786 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-catalog-content\") pod \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\" (UID: \"b9ccf1b8-a1eb-4501-b96c-929efdde7c47\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.390483 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-utilities" (OuterVolumeSpecName: "utilities") pod "b9ccf1b8-a1eb-4501-b96c-929efdde7c47" (UID: "b9ccf1b8-a1eb-4501-b96c-929efdde7c47"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.390542 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flxtb\" (UniqueName: \"kubernetes.io/projected/b05f3cd2-b60c-4dc3-9455-bf23335b678a-kube-api-access-flxtb\") pod \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.390899 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7788ab37-0666-48ca-aff9-b145fc248e4c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7788ab37-0666-48ca-aff9-b145fc248e4c" (UID: "7788ab37-0666-48ca-aff9-b145fc248e4c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.390952 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-config\") pod \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391098 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-operator-scripts\") pod \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\" (UID: \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391160 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38aafecf-4ce9-460d-8a3d-aa42b566ab81-operator-scripts\") pod \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\" (UID: \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391286 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rzzc\" (UniqueName: \"kubernetes.io/projected/7788ab37-0666-48ca-aff9-b145fc248e4c-kube-api-access-2rzzc\") pod \"7788ab37-0666-48ca-aff9-b145fc248e4c\" (UID: \"7788ab37-0666-48ca-aff9-b145fc248e4c\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391335 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52n8j\" (UniqueName: \"kubernetes.io/projected/38aafecf-4ce9-460d-8a3d-aa42b566ab81-kube-api-access-52n8j\") pod \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\" (UID: \"38aafecf-4ce9-460d-8a3d-aa42b566ab81\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391397 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt4th\" (UniqueName: \"kubernetes.io/projected/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-kube-api-access-gt4th\") pod \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\" (UID: \"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391426 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnzv8\" (UniqueName: \"kubernetes.io/projected/af5046a4-bfe1-474f-8954-ada04116efa6-kube-api-access-cnzv8\") pod \"af5046a4-bfe1-474f-8954-ada04116efa6\" (UID: \"af5046a4-bfe1-474f-8954-ada04116efa6\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391475 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-nb\") pod \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391527 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-sb\") pod \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391591 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-dns-svc\") pod \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\" (UID: \"b05f3cd2-b60c-4dc3-9455-bf23335b678a\") " Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391698 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af5046a4-bfe1-474f-8954-ada04116efa6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "af5046a4-bfe1-474f-8954-ada04116efa6" (UID: "af5046a4-bfe1-474f-8954-ada04116efa6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.391879 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0ff43c91-26eb-40da-a2cc-9d07c1e3be1c" (UID: "0ff43c91-26eb-40da-a2cc-9d07c1e3be1c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.393102 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.393129 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af5046a4-bfe1-474f-8954-ada04116efa6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.393143 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7788ab37-0666-48ca-aff9-b145fc248e4c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.393153 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.392166 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38aafecf-4ce9-460d-8a3d-aa42b566ab81-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "38aafecf-4ce9-460d-8a3d-aa42b566ab81" (UID: "38aafecf-4ce9-460d-8a3d-aa42b566ab81"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.395153 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-kube-api-access-xrf9x" (OuterVolumeSpecName: "kube-api-access-xrf9x") pod "b9ccf1b8-a1eb-4501-b96c-929efdde7c47" (UID: "b9ccf1b8-a1eb-4501-b96c-929efdde7c47"). InnerVolumeSpecName "kube-api-access-xrf9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.395857 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b05f3cd2-b60c-4dc3-9455-bf23335b678a-kube-api-access-flxtb" (OuterVolumeSpecName: "kube-api-access-flxtb") pod "b05f3cd2-b60c-4dc3-9455-bf23335b678a" (UID: "b05f3cd2-b60c-4dc3-9455-bf23335b678a"). InnerVolumeSpecName "kube-api-access-flxtb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.396095 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38aafecf-4ce9-460d-8a3d-aa42b566ab81-kube-api-access-52n8j" (OuterVolumeSpecName: "kube-api-access-52n8j") pod "38aafecf-4ce9-460d-8a3d-aa42b566ab81" (UID: "38aafecf-4ce9-460d-8a3d-aa42b566ab81"). InnerVolumeSpecName "kube-api-access-52n8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.396420 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7788ab37-0666-48ca-aff9-b145fc248e4c-kube-api-access-2rzzc" (OuterVolumeSpecName: "kube-api-access-2rzzc") pod "7788ab37-0666-48ca-aff9-b145fc248e4c" (UID: "7788ab37-0666-48ca-aff9-b145fc248e4c"). InnerVolumeSpecName "kube-api-access-2rzzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.400537 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-kube-api-access-gt4th" (OuterVolumeSpecName: "kube-api-access-gt4th") pod "0ff43c91-26eb-40da-a2cc-9d07c1e3be1c" (UID: "0ff43c91-26eb-40da-a2cc-9d07c1e3be1c"). InnerVolumeSpecName "kube-api-access-gt4th". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.400937 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af5046a4-bfe1-474f-8954-ada04116efa6-kube-api-access-cnzv8" (OuterVolumeSpecName: "kube-api-access-cnzv8") pod "af5046a4-bfe1-474f-8954-ada04116efa6" (UID: "af5046a4-bfe1-474f-8954-ada04116efa6"). InnerVolumeSpecName "kube-api-access-cnzv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.424648 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-config" (OuterVolumeSpecName: "config") pod "b05f3cd2-b60c-4dc3-9455-bf23335b678a" (UID: "b05f3cd2-b60c-4dc3-9455-bf23335b678a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.431724 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b05f3cd2-b60c-4dc3-9455-bf23335b678a" (UID: "b05f3cd2-b60c-4dc3-9455-bf23335b678a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.436344 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b9ccf1b8-a1eb-4501-b96c-929efdde7c47" (UID: "b9ccf1b8-a1eb-4501-b96c-929efdde7c47"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.436719 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b05f3cd2-b60c-4dc3-9455-bf23335b678a" (UID: "b05f3cd2-b60c-4dc3-9455-bf23335b678a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.437347 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b05f3cd2-b60c-4dc3-9455-bf23335b678a" (UID: "b05f3cd2-b60c-4dc3-9455-bf23335b678a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495778 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrf9x\" (UniqueName: \"kubernetes.io/projected/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-kube-api-access-xrf9x\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495819 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ccf1b8-a1eb-4501-b96c-929efdde7c47-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495831 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flxtb\" (UniqueName: \"kubernetes.io/projected/b05f3cd2-b60c-4dc3-9455-bf23335b678a-kube-api-access-flxtb\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495844 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495888 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38aafecf-4ce9-460d-8a3d-aa42b566ab81-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495899 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rzzc\" (UniqueName: \"kubernetes.io/projected/7788ab37-0666-48ca-aff9-b145fc248e4c-kube-api-access-2rzzc\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495916 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52n8j\" (UniqueName: \"kubernetes.io/projected/38aafecf-4ce9-460d-8a3d-aa42b566ab81-kube-api-access-52n8j\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495927 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt4th\" (UniqueName: \"kubernetes.io/projected/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c-kube-api-access-gt4th\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495936 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnzv8\" (UniqueName: \"kubernetes.io/projected/af5046a4-bfe1-474f-8954-ada04116efa6-kube-api-access-cnzv8\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495945 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495958 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.495966 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05f3cd2-b60c-4dc3-9455-bf23335b678a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.701999 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-19de-account-create-update-n48h2" event={"ID":"38aafecf-4ce9-460d-8a3d-aa42b566ab81","Type":"ContainerDied","Data":"cd1fbba9f6bdda84abbf6aa4ac70e11aa29310f0b5aa98a0544bae887cb3e455"} Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.702119 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd1fbba9f6bdda84abbf6aa4ac70e11aa29310f0b5aa98a0544bae887cb3e455" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.702028 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-19de-account-create-update-n48h2" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.704220 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-kcsnv" event={"ID":"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba","Type":"ContainerStarted","Data":"5d1a95a642892281824c953077c28648621f41b19d73ef7d8b96b84e3468bf2f"} Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.706292 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b651-account-create-update-w2d6t" event={"ID":"af5046a4-bfe1-474f-8954-ada04116efa6","Type":"ContainerDied","Data":"fe9aeb17be1184a1ff39f2dd2d25cb001ffb1f357cdce910ea7bc9469692e23f"} Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.706404 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe9aeb17be1184a1ff39f2dd2d25cb001ffb1f357cdce910ea7bc9469692e23f" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.706321 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b651-account-create-update-w2d6t" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.708155 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-m9nq2" event={"ID":"7788ab37-0666-48ca-aff9-b145fc248e4c","Type":"ContainerDied","Data":"a7bdd5bb881fd447a00d29cd137f3ec738e684aa98994e54bbf570e765462f79"} Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.708198 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-m9nq2" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.708218 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7bdd5bb881fd447a00d29cd137f3ec738e684aa98994e54bbf570e765462f79" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.709736 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-mwl7j" event={"ID":"0ff43c91-26eb-40da-a2cc-9d07c1e3be1c","Type":"ContainerDied","Data":"06c8c31247fee6df23bb8c3a5844d686452fff45f2622326f946515dc17f61be"} Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.709747 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-mwl7j" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.709759 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06c8c31247fee6df23bb8c3a5844d686452fff45f2622326f946515dc17f61be" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.712735 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kh56n" event={"ID":"b9ccf1b8-a1eb-4501-b96c-929efdde7c47","Type":"ContainerDied","Data":"f5559be523f7106c5547a83ae4973770c22ca85f583ac8bc3b9659bf088b5377"} Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.712836 4492 scope.go:117] "RemoveContainer" containerID="c8fa541244d89f0dd6566869f9096b18ab4e232557c168d4547df0c00453667a" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.712881 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kh56n" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.727560 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-kcsnv" podStartSLOduration=1.751361138 podStartE2EDuration="7.727547268s" podCreationTimestamp="2025-11-26 07:04:26 +0000 UTC" firstStartedPulling="2025-11-26 07:04:27.165307614 +0000 UTC m=+963.049195911" lastFinishedPulling="2025-11-26 07:04:33.141493753 +0000 UTC m=+969.025382041" observedRunningTime="2025-11-26 07:04:33.725220112 +0000 UTC m=+969.609108410" watchObservedRunningTime="2025-11-26 07:04:33.727547268 +0000 UTC m=+969.611435565" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.729983 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" event={"ID":"b05f3cd2-b60c-4dc3-9455-bf23335b678a","Type":"ContainerDied","Data":"bb471aa8e4c11fa404fdbe26c149a12053cb31c01f54866a20863a420714761e"} Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.730335 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66f998579f-v5z4b" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.754888 4492 scope.go:117] "RemoveContainer" containerID="e71ca96ed9f545971c429ea0c754d993eb4b8ae1c656206017aabb06f50144e2" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.790492 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kh56n"] Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.799473 4492 scope.go:117] "RemoveContainer" containerID="2dd03be46eb8fb90ed6fc6a22a4ed1bd99a099ae388f17b75b01084b532f3929" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.814187 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kh56n"] Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.814244 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66f998579f-v5z4b"] Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.818252 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66f998579f-v5z4b"] Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.819672 4492 scope.go:117] "RemoveContainer" containerID="d3c188c850d87c7c792ed41ff0caf5ac9c8200733029aedb42a8ae459b0d664d" Nov 26 07:04:33 crc kubenswrapper[4492]: I1126 07:04:33.852496 4492 scope.go:117] "RemoveContainer" containerID="54c7391888a2d826f38a2c3d2511eab9909b6c6eaf0432b604f32e3adcf0de5c" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.040763 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119465 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-txt7d"] Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119798 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerName="extract-utilities" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119811 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerName="extract-utilities" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119823 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerName="extract-utilities" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119829 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerName="extract-utilities" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119836 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerName="registry-server" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119843 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerName="registry-server" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119852 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerName="extract-content" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119857 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerName="extract-content" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119864 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerName="registry-server" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119869 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerName="registry-server" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119875 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38aafecf-4ce9-460d-8a3d-aa42b566ab81" containerName="mariadb-account-create-update" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119880 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="38aafecf-4ce9-460d-8a3d-aa42b566ab81" containerName="mariadb-account-create-update" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119888 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af5046a4-bfe1-474f-8954-ada04116efa6" containerName="mariadb-account-create-update" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119893 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="af5046a4-bfe1-474f-8954-ada04116efa6" containerName="mariadb-account-create-update" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119900 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b05f3cd2-b60c-4dc3-9455-bf23335b678a" containerName="dnsmasq-dns" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119934 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b05f3cd2-b60c-4dc3-9455-bf23335b678a" containerName="dnsmasq-dns" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119951 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7788ab37-0666-48ca-aff9-b145fc248e4c" containerName="mariadb-database-create" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119956 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7788ab37-0666-48ca-aff9-b145fc248e4c" containerName="mariadb-database-create" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119962 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ff43c91-26eb-40da-a2cc-9d07c1e3be1c" containerName="mariadb-database-create" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119967 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ff43c91-26eb-40da-a2cc-9d07c1e3be1c" containerName="mariadb-database-create" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119977 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerName="extract-content" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.119982 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerName="extract-content" Nov 26 07:04:34 crc kubenswrapper[4492]: E1126 07:04:34.119998 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b05f3cd2-b60c-4dc3-9455-bf23335b678a" containerName="init" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.120003 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b05f3cd2-b60c-4dc3-9455-bf23335b678a" containerName="init" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.120147 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" containerName="registry-server" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.120155 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="af5046a4-bfe1-474f-8954-ada04116efa6" containerName="mariadb-account-create-update" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.120162 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b05f3cd2-b60c-4dc3-9455-bf23335b678a" containerName="dnsmasq-dns" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.120181 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ff43c91-26eb-40da-a2cc-9d07c1e3be1c" containerName="mariadb-database-create" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.120192 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a883fb65-c766-4a13-bac0-177d4ffe2de2" containerName="registry-server" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.120200 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="38aafecf-4ce9-460d-8a3d-aa42b566ab81" containerName="mariadb-account-create-update" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.120208 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7788ab37-0666-48ca-aff9-b145fc248e4c" containerName="mariadb-database-create" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.120717 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-txt7d" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.122658 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m4hd\" (UniqueName: \"kubernetes.io/projected/282cd276-6463-4ade-8c5a-a7682fc10269-kube-api-access-7m4hd\") pod \"glance-db-create-txt7d\" (UID: \"282cd276-6463-4ade-8c5a-a7682fc10269\") " pod="openstack/glance-db-create-txt7d" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.122757 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/282cd276-6463-4ade-8c5a-a7682fc10269-operator-scripts\") pod \"glance-db-create-txt7d\" (UID: \"282cd276-6463-4ade-8c5a-a7682fc10269\") " pod="openstack/glance-db-create-txt7d" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.136186 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-txt7d"] Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.162156 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.223312 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-89b0-account-create-update-f54f8"] Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.224544 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.225463 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/282cd276-6463-4ade-8c5a-a7682fc10269-operator-scripts\") pod \"glance-db-create-txt7d\" (UID: \"282cd276-6463-4ade-8c5a-a7682fc10269\") " pod="openstack/glance-db-create-txt7d" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.225599 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m4hd\" (UniqueName: \"kubernetes.io/projected/282cd276-6463-4ade-8c5a-a7682fc10269-kube-api-access-7m4hd\") pod \"glance-db-create-txt7d\" (UID: \"282cd276-6463-4ade-8c5a-a7682fc10269\") " pod="openstack/glance-db-create-txt7d" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.226256 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/282cd276-6463-4ade-8c5a-a7682fc10269-operator-scripts\") pod \"glance-db-create-txt7d\" (UID: \"282cd276-6463-4ade-8c5a-a7682fc10269\") " pod="openstack/glance-db-create-txt7d" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.227855 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.238487 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-89b0-account-create-update-f54f8"] Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.248857 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m4hd\" (UniqueName: \"kubernetes.io/projected/282cd276-6463-4ade-8c5a-a7682fc10269-kube-api-access-7m4hd\") pod \"glance-db-create-txt7d\" (UID: \"282cd276-6463-4ade-8c5a-a7682fc10269\") " pod="openstack/glance-db-create-txt7d" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.327019 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vggpp\" (UniqueName: \"kubernetes.io/projected/35c76cd0-263a-4ad9-8d69-5fa9960652f3-kube-api-access-vggpp\") pod \"glance-89b0-account-create-update-f54f8\" (UID: \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\") " pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.327245 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c76cd0-263a-4ad9-8d69-5fa9960652f3-operator-scripts\") pod \"glance-89b0-account-create-update-f54f8\" (UID: \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\") " pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.429260 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vggpp\" (UniqueName: \"kubernetes.io/projected/35c76cd0-263a-4ad9-8d69-5fa9960652f3-kube-api-access-vggpp\") pod \"glance-89b0-account-create-update-f54f8\" (UID: \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\") " pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.429383 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c76cd0-263a-4ad9-8d69-5fa9960652f3-operator-scripts\") pod \"glance-89b0-account-create-update-f54f8\" (UID: \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\") " pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.430078 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c76cd0-263a-4ad9-8d69-5fa9960652f3-operator-scripts\") pod \"glance-89b0-account-create-update-f54f8\" (UID: \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\") " pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.453387 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vggpp\" (UniqueName: \"kubernetes.io/projected/35c76cd0-263a-4ad9-8d69-5fa9960652f3-kube-api-access-vggpp\") pod \"glance-89b0-account-create-update-f54f8\" (UID: \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\") " pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.459346 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-txt7d" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.459587 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b05f3cd2-b60c-4dc3-9455-bf23335b678a" path="/var/lib/kubelet/pods/b05f3cd2-b60c-4dc3-9455-bf23335b678a/volumes" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.461792 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9ccf1b8-a1eb-4501-b96c-929efdde7c47" path="/var/lib/kubelet/pods/b9ccf1b8-a1eb-4501-b96c-929efdde7c47/volumes" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.536062 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:34 crc kubenswrapper[4492]: I1126 07:04:34.850466 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-txt7d"] Nov 26 07:04:34 crc kubenswrapper[4492]: W1126 07:04:34.852568 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod282cd276_6463_4ade_8c5a_a7682fc10269.slice/crio-0a44d6444416a7752657bcd2ec206ef5f82739f1e8984a0ef5f554b7fabefff0 WatchSource:0}: Error finding container 0a44d6444416a7752657bcd2ec206ef5f82739f1e8984a0ef5f554b7fabefff0: Status 404 returned error can't find the container with id 0a44d6444416a7752657bcd2ec206ef5f82739f1e8984a0ef5f554b7fabefff0 Nov 26 07:04:35 crc kubenswrapper[4492]: I1126 07:04:35.006927 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-89b0-account-create-update-f54f8"] Nov 26 07:04:35 crc kubenswrapper[4492]: W1126 07:04:35.015341 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod35c76cd0_263a_4ad9_8d69_5fa9960652f3.slice/crio-7b519545ac9ae36e755a327ad8afa22333b59390484b007cbea795d557dad74c WatchSource:0}: Error finding container 7b519545ac9ae36e755a327ad8afa22333b59390484b007cbea795d557dad74c: Status 404 returned error can't find the container with id 7b519545ac9ae36e755a327ad8afa22333b59390484b007cbea795d557dad74c Nov 26 07:04:35 crc kubenswrapper[4492]: I1126 07:04:35.767561 4492 generic.go:334] "Generic (PLEG): container finished" podID="35c76cd0-263a-4ad9-8d69-5fa9960652f3" containerID="d2f8d830e51b4d867cb451729dc1f9967f17e8cf5b60090c697ff70977a76be5" exitCode=0 Nov 26 07:04:35 crc kubenswrapper[4492]: I1126 07:04:35.767682 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-89b0-account-create-update-f54f8" event={"ID":"35c76cd0-263a-4ad9-8d69-5fa9960652f3","Type":"ContainerDied","Data":"d2f8d830e51b4d867cb451729dc1f9967f17e8cf5b60090c697ff70977a76be5"} Nov 26 07:04:35 crc kubenswrapper[4492]: I1126 07:04:35.767780 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-89b0-account-create-update-f54f8" event={"ID":"35c76cd0-263a-4ad9-8d69-5fa9960652f3","Type":"ContainerStarted","Data":"7b519545ac9ae36e755a327ad8afa22333b59390484b007cbea795d557dad74c"} Nov 26 07:04:35 crc kubenswrapper[4492]: I1126 07:04:35.769600 4492 generic.go:334] "Generic (PLEG): container finished" podID="282cd276-6463-4ade-8c5a-a7682fc10269" containerID="48c8e66bb4cec5ab05350b6d4032ff9c8cd7464acdac7323636e8673eb2aad69" exitCode=0 Nov 26 07:04:35 crc kubenswrapper[4492]: I1126 07:04:35.769705 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-txt7d" event={"ID":"282cd276-6463-4ade-8c5a-a7682fc10269","Type":"ContainerDied","Data":"48c8e66bb4cec5ab05350b6d4032ff9c8cd7464acdac7323636e8673eb2aad69"} Nov 26 07:04:35 crc kubenswrapper[4492]: I1126 07:04:35.769802 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-txt7d" event={"ID":"282cd276-6463-4ade-8c5a-a7682fc10269","Type":"ContainerStarted","Data":"0a44d6444416a7752657bcd2ec206ef5f82739f1e8984a0ef5f554b7fabefff0"} Nov 26 07:04:36 crc kubenswrapper[4492]: I1126 07:04:36.424315 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2nmf5"] Nov 26 07:04:36 crc kubenswrapper[4492]: I1126 07:04:36.425050 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2nmf5" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerName="registry-server" containerID="cri-o://2ad589ae4993de38d6b48392104ac2e0cafe6527ca5f9030066e0b3d3d49e4a9" gracePeriod=2 Nov 26 07:04:36 crc kubenswrapper[4492]: I1126 07:04:36.780862 4492 generic.go:334] "Generic (PLEG): container finished" podID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerID="2ad589ae4993de38d6b48392104ac2e0cafe6527ca5f9030066e0b3d3d49e4a9" exitCode=0 Nov 26 07:04:36 crc kubenswrapper[4492]: I1126 07:04:36.780947 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nmf5" event={"ID":"0fd9ab99-d364-4442-a203-ce8a7b838cf3","Type":"ContainerDied","Data":"2ad589ae4993de38d6b48392104ac2e0cafe6527ca5f9030066e0b3d3d49e4a9"} Nov 26 07:04:36 crc kubenswrapper[4492]: I1126 07:04:36.870859 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:36 crc kubenswrapper[4492]: I1126 07:04:36.991375 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-catalog-content\") pod \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " Nov 26 07:04:36 crc kubenswrapper[4492]: I1126 07:04:36.991453 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d49m6\" (UniqueName: \"kubernetes.io/projected/0fd9ab99-d364-4442-a203-ce8a7b838cf3-kube-api-access-d49m6\") pod \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " Nov 26 07:04:36 crc kubenswrapper[4492]: I1126 07:04:36.991626 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-utilities\") pod \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\" (UID: \"0fd9ab99-d364-4442-a203-ce8a7b838cf3\") " Nov 26 07:04:36 crc kubenswrapper[4492]: I1126 07:04:36.992815 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-utilities" (OuterVolumeSpecName: "utilities") pod "0fd9ab99-d364-4442-a203-ce8a7b838cf3" (UID: "0fd9ab99-d364-4442-a203-ce8a7b838cf3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.003804 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fd9ab99-d364-4442-a203-ce8a7b838cf3-kube-api-access-d49m6" (OuterVolumeSpecName: "kube-api-access-d49m6") pod "0fd9ab99-d364-4442-a203-ce8a7b838cf3" (UID: "0fd9ab99-d364-4442-a203-ce8a7b838cf3"). InnerVolumeSpecName "kube-api-access-d49m6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.091810 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0fd9ab99-d364-4442-a203-ce8a7b838cf3" (UID: "0fd9ab99-d364-4442-a203-ce8a7b838cf3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.094042 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.094081 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0fd9ab99-d364-4442-a203-ce8a7b838cf3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.094095 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d49m6\" (UniqueName: \"kubernetes.io/projected/0fd9ab99-d364-4442-a203-ce8a7b838cf3-kube-api-access-d49m6\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.145490 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-txt7d" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.154453 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.298459 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c76cd0-263a-4ad9-8d69-5fa9960652f3-operator-scripts\") pod \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\" (UID: \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\") " Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.298538 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7m4hd\" (UniqueName: \"kubernetes.io/projected/282cd276-6463-4ade-8c5a-a7682fc10269-kube-api-access-7m4hd\") pod \"282cd276-6463-4ade-8c5a-a7682fc10269\" (UID: \"282cd276-6463-4ade-8c5a-a7682fc10269\") " Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.298579 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/282cd276-6463-4ade-8c5a-a7682fc10269-operator-scripts\") pod \"282cd276-6463-4ade-8c5a-a7682fc10269\" (UID: \"282cd276-6463-4ade-8c5a-a7682fc10269\") " Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.298667 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vggpp\" (UniqueName: \"kubernetes.io/projected/35c76cd0-263a-4ad9-8d69-5fa9960652f3-kube-api-access-vggpp\") pod \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\" (UID: \"35c76cd0-263a-4ad9-8d69-5fa9960652f3\") " Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.300715 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/282cd276-6463-4ade-8c5a-a7682fc10269-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "282cd276-6463-4ade-8c5a-a7682fc10269" (UID: "282cd276-6463-4ade-8c5a-a7682fc10269"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.301151 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35c76cd0-263a-4ad9-8d69-5fa9960652f3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "35c76cd0-263a-4ad9-8d69-5fa9960652f3" (UID: "35c76cd0-263a-4ad9-8d69-5fa9960652f3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.302413 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35c76cd0-263a-4ad9-8d69-5fa9960652f3-kube-api-access-vggpp" (OuterVolumeSpecName: "kube-api-access-vggpp") pod "35c76cd0-263a-4ad9-8d69-5fa9960652f3" (UID: "35c76cd0-263a-4ad9-8d69-5fa9960652f3"). InnerVolumeSpecName "kube-api-access-vggpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.305408 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/282cd276-6463-4ade-8c5a-a7682fc10269-kube-api-access-7m4hd" (OuterVolumeSpecName: "kube-api-access-7m4hd") pod "282cd276-6463-4ade-8c5a-a7682fc10269" (UID: "282cd276-6463-4ade-8c5a-a7682fc10269"). InnerVolumeSpecName "kube-api-access-7m4hd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.401479 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7m4hd\" (UniqueName: \"kubernetes.io/projected/282cd276-6463-4ade-8c5a-a7682fc10269-kube-api-access-7m4hd\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.401512 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/282cd276-6463-4ade-8c5a-a7682fc10269-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.401524 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vggpp\" (UniqueName: \"kubernetes.io/projected/35c76cd0-263a-4ad9-8d69-5fa9960652f3-kube-api-access-vggpp\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.401535 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c76cd0-263a-4ad9-8d69-5fa9960652f3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.794345 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2nmf5" event={"ID":"0fd9ab99-d364-4442-a203-ce8a7b838cf3","Type":"ContainerDied","Data":"961d32c65dfc85e38bb0e078ba2bf3119c26fdbb6a049493ebe20792affaf3db"} Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.794410 4492 scope.go:117] "RemoveContainer" containerID="2ad589ae4993de38d6b48392104ac2e0cafe6527ca5f9030066e0b3d3d49e4a9" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.794532 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2nmf5" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.804321 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-89b0-account-create-update-f54f8" event={"ID":"35c76cd0-263a-4ad9-8d69-5fa9960652f3","Type":"ContainerDied","Data":"7b519545ac9ae36e755a327ad8afa22333b59390484b007cbea795d557dad74c"} Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.804721 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b519545ac9ae36e755a327ad8afa22333b59390484b007cbea795d557dad74c" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.804759 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-89b0-account-create-update-f54f8" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.808811 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-txt7d" event={"ID":"282cd276-6463-4ade-8c5a-a7682fc10269","Type":"ContainerDied","Data":"0a44d6444416a7752657bcd2ec206ef5f82739f1e8984a0ef5f554b7fabefff0"} Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.808861 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a44d6444416a7752657bcd2ec206ef5f82739f1e8984a0ef5f554b7fabefff0" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.808963 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-txt7d" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.840407 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2nmf5"] Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.840513 4492 scope.go:117] "RemoveContainer" containerID="6dfe90b3854d34ce2f548bdd89ca965bb159362e7e4057baab45fd705cb8e3dd" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.847316 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2nmf5"] Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.862039 4492 scope.go:117] "RemoveContainer" containerID="1cc614f5739d6759766904cec4202186d0d69f06c660fb5ce7216b80d255871e" Nov 26 07:04:37 crc kubenswrapper[4492]: I1126 07:04:37.911220 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:37 crc kubenswrapper[4492]: E1126 07:04:37.911518 4492 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 07:04:37 crc kubenswrapper[4492]: E1126 07:04:37.911543 4492 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 07:04:37 crc kubenswrapper[4492]: E1126 07:04:37.911584 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift podName:b96d68d3-ed92-40be-bfed-6143b3cdca02 nodeName:}" failed. No retries permitted until 2025-11-26 07:04:53.911570053 +0000 UTC m=+989.795458350 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift") pod "swift-storage-0" (UID: "b96d68d3-ed92-40be-bfed-6143b3cdca02") : configmap "swift-ring-files" not found Nov 26 07:04:38 crc kubenswrapper[4492]: I1126 07:04:38.446033 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" path="/var/lib/kubelet/pods/0fd9ab99-d364-4442-a203-ce8a7b838cf3/volumes" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.500489 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-j8sgq"] Nov 26 07:04:39 crc kubenswrapper[4492]: E1126 07:04:39.500823 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="282cd276-6463-4ade-8c5a-a7682fc10269" containerName="mariadb-database-create" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.500838 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="282cd276-6463-4ade-8c5a-a7682fc10269" containerName="mariadb-database-create" Nov 26 07:04:39 crc kubenswrapper[4492]: E1126 07:04:39.500854 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerName="extract-content" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.500859 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerName="extract-content" Nov 26 07:04:39 crc kubenswrapper[4492]: E1126 07:04:39.500876 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c76cd0-263a-4ad9-8d69-5fa9960652f3" containerName="mariadb-account-create-update" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.500882 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c76cd0-263a-4ad9-8d69-5fa9960652f3" containerName="mariadb-account-create-update" Nov 26 07:04:39 crc kubenswrapper[4492]: E1126 07:04:39.500890 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerName="extract-utilities" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.500894 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerName="extract-utilities" Nov 26 07:04:39 crc kubenswrapper[4492]: E1126 07:04:39.500906 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerName="registry-server" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.500921 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerName="registry-server" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.501073 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fd9ab99-d364-4442-a203-ce8a7b838cf3" containerName="registry-server" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.501087 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="282cd276-6463-4ade-8c5a-a7682fc10269" containerName="mariadb-database-create" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.501095 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c76cd0-263a-4ad9-8d69-5fa9960652f3" containerName="mariadb-account-create-update" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.501631 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.503879 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.507358 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c7w8z" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.511049 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-j8sgq"] Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.552294 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-combined-ca-bundle\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.552456 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-config-data\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.552501 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlh42\" (UniqueName: \"kubernetes.io/projected/17dcd8ee-932f-4f74-be85-653f6f94a213-kube-api-access-rlh42\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.552583 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-db-sync-config-data\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.654967 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-config-data\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.655094 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlh42\" (UniqueName: \"kubernetes.io/projected/17dcd8ee-932f-4f74-be85-653f6f94a213-kube-api-access-rlh42\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.655780 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-db-sync-config-data\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.656596 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-combined-ca-bundle\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.662513 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-db-sync-config-data\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.662789 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-config-data\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.670714 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-combined-ca-bundle\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.671973 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlh42\" (UniqueName: \"kubernetes.io/projected/17dcd8ee-932f-4f74-be85-653f6f94a213-kube-api-access-rlh42\") pod \"glance-db-sync-j8sgq\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.816311 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-j8sgq" Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.828685 4492 generic.go:334] "Generic (PLEG): container finished" podID="a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" containerID="5d1a95a642892281824c953077c28648621f41b19d73ef7d8b96b84e3468bf2f" exitCode=0 Nov 26 07:04:39 crc kubenswrapper[4492]: I1126 07:04:39.828736 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-kcsnv" event={"ID":"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba","Type":"ContainerDied","Data":"5d1a95a642892281824c953077c28648621f41b19d73ef7d8b96b84e3468bf2f"} Nov 26 07:04:40 crc kubenswrapper[4492]: I1126 07:04:40.226369 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 26 07:04:40 crc kubenswrapper[4492]: I1126 07:04:40.356252 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-j8sgq"] Nov 26 07:04:40 crc kubenswrapper[4492]: I1126 07:04:40.840783 4492 generic.go:334] "Generic (PLEG): container finished" podID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" containerID="0608b29441266fad95d69d5b2720f135463681abce1d524e8aa820621905da40" exitCode=0 Nov 26 07:04:40 crc kubenswrapper[4492]: I1126 07:04:40.840868 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"daa58280-e6a7-477f-bfdb-accd4f56ac4d","Type":"ContainerDied","Data":"0608b29441266fad95d69d5b2720f135463681abce1d524e8aa820621905da40"} Nov 26 07:04:40 crc kubenswrapper[4492]: I1126 07:04:40.843009 4492 generic.go:334] "Generic (PLEG): container finished" podID="3bb75c38-10db-46c0-947c-3d91eca8f110" containerID="3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b" exitCode=0 Nov 26 07:04:40 crc kubenswrapper[4492]: I1126 07:04:40.843068 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3bb75c38-10db-46c0-947c-3d91eca8f110","Type":"ContainerDied","Data":"3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b"} Nov 26 07:04:40 crc kubenswrapper[4492]: I1126 07:04:40.844766 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-j8sgq" event={"ID":"17dcd8ee-932f-4f74-be85-653f6f94a213","Type":"ContainerStarted","Data":"e5ba151e783e26d24ff36d2e824b60322e3b8d76daab8a9ff6df2003590736f0"} Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.314078 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.410552 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-scripts\") pod \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.410602 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-combined-ca-bundle\") pod \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.410681 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-etc-swift\") pod \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.410717 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-swiftconf\") pod \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.410777 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-dispersionconf\") pod \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.410794 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsg26\" (UniqueName: \"kubernetes.io/projected/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-kube-api-access-wsg26\") pod \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.410820 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-ring-data-devices\") pod \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\" (UID: \"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba\") " Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.411452 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" (UID: "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.411890 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" (UID: "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.416597 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-kube-api-access-wsg26" (OuterVolumeSpecName: "kube-api-access-wsg26") pod "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" (UID: "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba"). InnerVolumeSpecName "kube-api-access-wsg26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.429786 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" (UID: "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.434423 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" (UID: "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.436311 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-scripts" (OuterVolumeSpecName: "scripts") pod "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" (UID: "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.453000 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" (UID: "a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.512637 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.512665 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.512677 4492 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.512689 4492 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.512698 4492 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.512706 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsg26\" (UniqueName: \"kubernetes.io/projected/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-kube-api-access-wsg26\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.512715 4492 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.861670 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-kcsnv" event={"ID":"a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba","Type":"ContainerDied","Data":"8e9900afd35ad8a927239812ed9cb5de7cfcf70769647ac2b664166d74c08a14"} Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.861732 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e9900afd35ad8a927239812ed9cb5de7cfcf70769647ac2b664166d74c08a14" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.861679 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-kcsnv" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.866650 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3bb75c38-10db-46c0-947c-3d91eca8f110","Type":"ContainerStarted","Data":"ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da"} Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.867877 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.869811 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"daa58280-e6a7-477f-bfdb-accd4f56ac4d","Type":"ContainerStarted","Data":"7da8e27aa6f0aa6e523745e3a62bc9ce4f5d3a4d4db083a0fa5f0d0f8afe014c"} Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.869994 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.910052 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.841184934 podStartE2EDuration="1m7.910037995s" podCreationTimestamp="2025-11-26 07:03:34 +0000 UTC" firstStartedPulling="2025-11-26 07:03:36.195892215 +0000 UTC m=+912.079780514" lastFinishedPulling="2025-11-26 07:04:04.264745276 +0000 UTC m=+940.148633575" observedRunningTime="2025-11-26 07:04:41.901541991 +0000 UTC m=+977.785430289" watchObservedRunningTime="2025-11-26 07:04:41.910037995 +0000 UTC m=+977.793926293" Nov 26 07:04:41 crc kubenswrapper[4492]: I1126 07:04:41.929977 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=40.079807367 podStartE2EDuration="1m7.929962106s" podCreationTimestamp="2025-11-26 07:03:34 +0000 UTC" firstStartedPulling="2025-11-26 07:03:36.509980908 +0000 UTC m=+912.393869206" lastFinishedPulling="2025-11-26 07:04:04.360135648 +0000 UTC m=+940.244023945" observedRunningTime="2025-11-26 07:04:41.927573985 +0000 UTC m=+977.811462283" watchObservedRunningTime="2025-11-26 07:04:41.929962106 +0000 UTC m=+977.813850404" Nov 26 07:04:50 crc kubenswrapper[4492]: I1126 07:04:50.741291 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-djlnq" podUID="cd517ac8-3f42-4406-8bb2-dd7f1b87daf7" containerName="ovn-controller" probeResult="failure" output=< Nov 26 07:04:50 crc kubenswrapper[4492]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 07:04:50 crc kubenswrapper[4492]: > Nov 26 07:04:50 crc kubenswrapper[4492]: I1126 07:04:50.811381 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:04:50 crc kubenswrapper[4492]: I1126 07:04:50.813009 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hrhlg" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.031601 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-djlnq-config-ngrmm"] Nov 26 07:04:51 crc kubenswrapper[4492]: E1126 07:04:51.032284 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" containerName="swift-ring-rebalance" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.032317 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" containerName="swift-ring-rebalance" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.032613 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4ff9a37-0e51-4210-8b06-9c10d0c1c3ba" containerName="swift-ring-rebalance" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.033530 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.044494 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.058017 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-djlnq-config-ngrmm"] Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.195540 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run-ovn\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.195608 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.195773 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-log-ovn\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.195818 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-additional-scripts\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.195848 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-scripts\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.195873 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwkp7\" (UniqueName: \"kubernetes.io/projected/4d9de6c4-365b-45e4-a8c8-380260efa4ed-kube-api-access-bwkp7\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.301934 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwkp7\" (UniqueName: \"kubernetes.io/projected/4d9de6c4-365b-45e4-a8c8-380260efa4ed-kube-api-access-bwkp7\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.302019 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run-ovn\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.302060 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.302161 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-log-ovn\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.302219 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-additional-scripts\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.302245 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-scripts\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.302473 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run-ovn\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.302484 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.302556 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-log-ovn\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.303217 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-additional-scripts\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.304642 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-scripts\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.324947 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwkp7\" (UniqueName: \"kubernetes.io/projected/4d9de6c4-365b-45e4-a8c8-380260efa4ed-kube-api-access-bwkp7\") pod \"ovn-controller-djlnq-config-ngrmm\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:51 crc kubenswrapper[4492]: I1126 07:04:51.355977 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:04:53 crc kubenswrapper[4492]: I1126 07:04:53.972612 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:53 crc kubenswrapper[4492]: I1126 07:04:53.980592 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b96d68d3-ed92-40be-bfed-6143b3cdca02-etc-swift\") pod \"swift-storage-0\" (UID: \"b96d68d3-ed92-40be-bfed-6143b3cdca02\") " pod="openstack/swift-storage-0" Nov 26 07:04:54 crc kubenswrapper[4492]: I1126 07:04:54.253575 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 07:04:55 crc kubenswrapper[4492]: I1126 07:04:55.658377 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:04:55 crc kubenswrapper[4492]: I1126 07:04:55.740673 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-djlnq" podUID="cd517ac8-3f42-4406-8bb2-dd7f1b87daf7" containerName="ovn-controller" probeResult="failure" output=< Nov 26 07:04:55 crc kubenswrapper[4492]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 07:04:55 crc kubenswrapper[4492]: > Nov 26 07:04:55 crc kubenswrapper[4492]: I1126 07:04:55.933464 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.712710 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-s6jw9"] Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.719198 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-s6jw9" Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.737708 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-s6jw9"] Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.859227 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b557b94-67b1-424a-9f45-84ea1183e728-operator-scripts\") pod \"cinder-db-create-s6jw9\" (UID: \"7b557b94-67b1-424a-9f45-84ea1183e728\") " pod="openstack/cinder-db-create-s6jw9" Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.859310 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nv9cv\" (UniqueName: \"kubernetes.io/projected/7b557b94-67b1-424a-9f45-84ea1183e728-kube-api-access-nv9cv\") pod \"cinder-db-create-s6jw9\" (UID: \"7b557b94-67b1-424a-9f45-84ea1183e728\") " pod="openstack/cinder-db-create-s6jw9" Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.870287 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-djlnq-config-ngrmm"] Nov 26 07:04:57 crc kubenswrapper[4492]: W1126 07:04:57.883474 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d9de6c4_365b_45e4_a8c8_380260efa4ed.slice/crio-0e1f59505a0a12f6e3fb17ef543c7a57231846b56dbafe9d031a12e5adbc6303 WatchSource:0}: Error finding container 0e1f59505a0a12f6e3fb17ef543c7a57231846b56dbafe9d031a12e5adbc6303: Status 404 returned error can't find the container with id 0e1f59505a0a12f6e3fb17ef543c7a57231846b56dbafe9d031a12e5adbc6303 Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.887456 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-phkjq"] Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.888653 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-phkjq" Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.914307 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-phkjq"] Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.945703 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-fccc-account-create-update-cxnnm"] Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.946939 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.950493 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-fccc-account-create-update-cxnnm"] Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.952450 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.967081 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b557b94-67b1-424a-9f45-84ea1183e728-operator-scripts\") pod \"cinder-db-create-s6jw9\" (UID: \"7b557b94-67b1-424a-9f45-84ea1183e728\") " pod="openstack/cinder-db-create-s6jw9" Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.967154 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nv9cv\" (UniqueName: \"kubernetes.io/projected/7b557b94-67b1-424a-9f45-84ea1183e728-kube-api-access-nv9cv\") pod \"cinder-db-create-s6jw9\" (UID: \"7b557b94-67b1-424a-9f45-84ea1183e728\") " pod="openstack/cinder-db-create-s6jw9" Nov 26 07:04:57 crc kubenswrapper[4492]: I1126 07:04:57.967987 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b557b94-67b1-424a-9f45-84ea1183e728-operator-scripts\") pod \"cinder-db-create-s6jw9\" (UID: \"7b557b94-67b1-424a-9f45-84ea1183e728\") " pod="openstack/cinder-db-create-s6jw9" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.011321 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nv9cv\" (UniqueName: \"kubernetes.io/projected/7b557b94-67b1-424a-9f45-84ea1183e728-kube-api-access-nv9cv\") pod \"cinder-db-create-s6jw9\" (UID: \"7b557b94-67b1-424a-9f45-84ea1183e728\") " pod="openstack/cinder-db-create-s6jw9" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.022941 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-djlnq-config-ngrmm" event={"ID":"4d9de6c4-365b-45e4-a8c8-380260efa4ed","Type":"ContainerStarted","Data":"0e1f59505a0a12f6e3fb17ef543c7a57231846b56dbafe9d031a12e5adbc6303"} Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.026235 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-1cd3-account-create-update-hp2bv"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.027381 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.029886 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.034785 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1cd3-account-create-update-hp2bv"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.054641 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-s6jw9" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.069643 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-operator-scripts\") pod \"cinder-fccc-account-create-update-cxnnm\" (UID: \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\") " pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.069733 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq6mt\" (UniqueName: \"kubernetes.io/projected/9770e190-04af-494f-9a6a-f3b242b9b9ad-kube-api-access-xq6mt\") pod \"barbican-db-create-phkjq\" (UID: \"9770e190-04af-494f-9a6a-f3b242b9b9ad\") " pod="openstack/barbican-db-create-phkjq" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.069846 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9770e190-04af-494f-9a6a-f3b242b9b9ad-operator-scripts\") pod \"barbican-db-create-phkjq\" (UID: \"9770e190-04af-494f-9a6a-f3b242b9b9ad\") " pod="openstack/barbican-db-create-phkjq" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.069884 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p4pk\" (UniqueName: \"kubernetes.io/projected/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-kube-api-access-5p4pk\") pod \"cinder-fccc-account-create-update-cxnnm\" (UID: \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\") " pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.109460 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-nl5zs"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.110608 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-nl5zs" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.123451 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.142096 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-nl5zs"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.172010 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq6mt\" (UniqueName: \"kubernetes.io/projected/9770e190-04af-494f-9a6a-f3b242b9b9ad-kube-api-access-xq6mt\") pod \"barbican-db-create-phkjq\" (UID: \"9770e190-04af-494f-9a6a-f3b242b9b9ad\") " pod="openstack/barbican-db-create-phkjq" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.172357 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2szk4\" (UniqueName: \"kubernetes.io/projected/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-kube-api-access-2szk4\") pod \"barbican-1cd3-account-create-update-hp2bv\" (UID: \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\") " pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.172395 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9770e190-04af-494f-9a6a-f3b242b9b9ad-operator-scripts\") pod \"barbican-db-create-phkjq\" (UID: \"9770e190-04af-494f-9a6a-f3b242b9b9ad\") " pod="openstack/barbican-db-create-phkjq" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.172421 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p4pk\" (UniqueName: \"kubernetes.io/projected/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-kube-api-access-5p4pk\") pod \"cinder-fccc-account-create-update-cxnnm\" (UID: \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\") " pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.172462 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-operator-scripts\") pod \"barbican-1cd3-account-create-update-hp2bv\" (UID: \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\") " pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.172494 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-operator-scripts\") pod \"cinder-fccc-account-create-update-cxnnm\" (UID: \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\") " pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.173055 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-operator-scripts\") pod \"cinder-fccc-account-create-update-cxnnm\" (UID: \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\") " pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.173607 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9770e190-04af-494f-9a6a-f3b242b9b9ad-operator-scripts\") pod \"barbican-db-create-phkjq\" (UID: \"9770e190-04af-494f-9a6a-f3b242b9b9ad\") " pod="openstack/barbican-db-create-phkjq" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.196768 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq6mt\" (UniqueName: \"kubernetes.io/projected/9770e190-04af-494f-9a6a-f3b242b9b9ad-kube-api-access-xq6mt\") pod \"barbican-db-create-phkjq\" (UID: \"9770e190-04af-494f-9a6a-f3b242b9b9ad\") " pod="openstack/barbican-db-create-phkjq" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.198863 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p4pk\" (UniqueName: \"kubernetes.io/projected/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-kube-api-access-5p4pk\") pod \"cinder-fccc-account-create-update-cxnnm\" (UID: \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\") " pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.202468 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-phkjq" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.212312 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-c6d4d"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.213275 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.216829 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.217074 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.217281 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t7xkz" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.217386 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.234993 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-c6d4d"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.286906 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.289865 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62tsv\" (UniqueName: \"kubernetes.io/projected/d934c9b1-8adf-41d3-9501-d55fffe02fd7-kube-api-access-62tsv\") pod \"keystone-db-sync-c6d4d\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.289904 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtkbc\" (UniqueName: \"kubernetes.io/projected/98538850-301a-4da8-aa72-7df26932b307-kube-api-access-mtkbc\") pod \"heat-db-create-nl5zs\" (UID: \"98538850-301a-4da8-aa72-7df26932b307\") " pod="openstack/heat-db-create-nl5zs" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.289961 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2szk4\" (UniqueName: \"kubernetes.io/projected/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-kube-api-access-2szk4\") pod \"barbican-1cd3-account-create-update-hp2bv\" (UID: \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\") " pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.290010 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-config-data\") pod \"keystone-db-sync-c6d4d\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.290042 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-operator-scripts\") pod \"barbican-1cd3-account-create-update-hp2bv\" (UID: \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\") " pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.290073 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98538850-301a-4da8-aa72-7df26932b307-operator-scripts\") pod \"heat-db-create-nl5zs\" (UID: \"98538850-301a-4da8-aa72-7df26932b307\") " pod="openstack/heat-db-create-nl5zs" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.290114 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-combined-ca-bundle\") pod \"keystone-db-sync-c6d4d\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.290940 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-operator-scripts\") pod \"barbican-1cd3-account-create-update-hp2bv\" (UID: \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\") " pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.350495 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2szk4\" (UniqueName: \"kubernetes.io/projected/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-kube-api-access-2szk4\") pod \"barbican-1cd3-account-create-update-hp2bv\" (UID: \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\") " pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.350707 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-tdf78"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.352428 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-tdf78" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.361668 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.374530 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-tdf78"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.391301 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b193a3f-89f3-467c-93db-3c84e3d9272d-operator-scripts\") pod \"neutron-db-create-tdf78\" (UID: \"4b193a3f-89f3-467c-93db-3c84e3d9272d\") " pod="openstack/neutron-db-create-tdf78" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.391363 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-config-data\") pod \"keystone-db-sync-c6d4d\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.391410 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7mh2\" (UniqueName: \"kubernetes.io/projected/4b193a3f-89f3-467c-93db-3c84e3d9272d-kube-api-access-b7mh2\") pod \"neutron-db-create-tdf78\" (UID: \"4b193a3f-89f3-467c-93db-3c84e3d9272d\") " pod="openstack/neutron-db-create-tdf78" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.391432 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98538850-301a-4da8-aa72-7df26932b307-operator-scripts\") pod \"heat-db-create-nl5zs\" (UID: \"98538850-301a-4da8-aa72-7df26932b307\") " pod="openstack/heat-db-create-nl5zs" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.391477 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-combined-ca-bundle\") pod \"keystone-db-sync-c6d4d\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.391506 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62tsv\" (UniqueName: \"kubernetes.io/projected/d934c9b1-8adf-41d3-9501-d55fffe02fd7-kube-api-access-62tsv\") pod \"keystone-db-sync-c6d4d\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.391532 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtkbc\" (UniqueName: \"kubernetes.io/projected/98538850-301a-4da8-aa72-7df26932b307-kube-api-access-mtkbc\") pod \"heat-db-create-nl5zs\" (UID: \"98538850-301a-4da8-aa72-7df26932b307\") " pod="openstack/heat-db-create-nl5zs" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.392340 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98538850-301a-4da8-aa72-7df26932b307-operator-scripts\") pod \"heat-db-create-nl5zs\" (UID: \"98538850-301a-4da8-aa72-7df26932b307\") " pod="openstack/heat-db-create-nl5zs" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.403897 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-combined-ca-bundle\") pod \"keystone-db-sync-c6d4d\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.415580 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-config-data\") pod \"keystone-db-sync-c6d4d\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.440572 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtkbc\" (UniqueName: \"kubernetes.io/projected/98538850-301a-4da8-aa72-7df26932b307-kube-api-access-mtkbc\") pod \"heat-db-create-nl5zs\" (UID: \"98538850-301a-4da8-aa72-7df26932b307\") " pod="openstack/heat-db-create-nl5zs" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.447533 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62tsv\" (UniqueName: \"kubernetes.io/projected/d934c9b1-8adf-41d3-9501-d55fffe02fd7-kube-api-access-62tsv\") pod \"keystone-db-sync-c6d4d\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.450285 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-136d-account-create-update-pslhl"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.451291 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.467130 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.476349 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-136d-account-create-update-pslhl"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.498549 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7mh2\" (UniqueName: \"kubernetes.io/projected/4b193a3f-89f3-467c-93db-3c84e3d9272d-kube-api-access-b7mh2\") pod \"neutron-db-create-tdf78\" (UID: \"4b193a3f-89f3-467c-93db-3c84e3d9272d\") " pod="openstack/neutron-db-create-tdf78" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.498693 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b193a3f-89f3-467c-93db-3c84e3d9272d-operator-scripts\") pod \"neutron-db-create-tdf78\" (UID: \"4b193a3f-89f3-467c-93db-3c84e3d9272d\") " pod="openstack/neutron-db-create-tdf78" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.499312 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b193a3f-89f3-467c-93db-3c84e3d9272d-operator-scripts\") pod \"neutron-db-create-tdf78\" (UID: \"4b193a3f-89f3-467c-93db-3c84e3d9272d\") " pod="openstack/neutron-db-create-tdf78" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.524957 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-d139-account-create-update-c7sz4"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.526227 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.533499 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7mh2\" (UniqueName: \"kubernetes.io/projected/4b193a3f-89f3-467c-93db-3c84e3d9272d-kube-api-access-b7mh2\") pod \"neutron-db-create-tdf78\" (UID: \"4b193a3f-89f3-467c-93db-3c84e3d9272d\") " pod="openstack/neutron-db-create-tdf78" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.534987 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.542622 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-d139-account-create-update-c7sz4"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.546123 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.609990 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4zvd\" (UniqueName: \"kubernetes.io/projected/27e7bb7f-22c2-4852-b264-371eeaa3907d-kube-api-access-m4zvd\") pod \"neutron-136d-account-create-update-pslhl\" (UID: \"27e7bb7f-22c2-4852-b264-371eeaa3907d\") " pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.610168 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjqz4\" (UniqueName: \"kubernetes.io/projected/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-kube-api-access-sjqz4\") pod \"heat-d139-account-create-update-c7sz4\" (UID: \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\") " pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.610213 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-operator-scripts\") pod \"heat-d139-account-create-update-c7sz4\" (UID: \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\") " pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.610274 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27e7bb7f-22c2-4852-b264-371eeaa3907d-operator-scripts\") pod \"neutron-136d-account-create-update-pslhl\" (UID: \"27e7bb7f-22c2-4852-b264-371eeaa3907d\") " pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.716768 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjqz4\" (UniqueName: \"kubernetes.io/projected/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-kube-api-access-sjqz4\") pod \"heat-d139-account-create-update-c7sz4\" (UID: \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\") " pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.716823 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-operator-scripts\") pod \"heat-d139-account-create-update-c7sz4\" (UID: \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\") " pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.716860 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27e7bb7f-22c2-4852-b264-371eeaa3907d-operator-scripts\") pod \"neutron-136d-account-create-update-pslhl\" (UID: \"27e7bb7f-22c2-4852-b264-371eeaa3907d\") " pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.716936 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4zvd\" (UniqueName: \"kubernetes.io/projected/27e7bb7f-22c2-4852-b264-371eeaa3907d-kube-api-access-m4zvd\") pod \"neutron-136d-account-create-update-pslhl\" (UID: \"27e7bb7f-22c2-4852-b264-371eeaa3907d\") " pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.717581 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-operator-scripts\") pod \"heat-d139-account-create-update-c7sz4\" (UID: \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\") " pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.718012 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27e7bb7f-22c2-4852-b264-371eeaa3907d-operator-scripts\") pod \"neutron-136d-account-create-update-pslhl\" (UID: \"27e7bb7f-22c2-4852-b264-371eeaa3907d\") " pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.729296 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-nl5zs" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.738952 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4zvd\" (UniqueName: \"kubernetes.io/projected/27e7bb7f-22c2-4852-b264-371eeaa3907d-kube-api-access-m4zvd\") pod \"neutron-136d-account-create-update-pslhl\" (UID: \"27e7bb7f-22c2-4852-b264-371eeaa3907d\") " pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.759690 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjqz4\" (UniqueName: \"kubernetes.io/projected/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-kube-api-access-sjqz4\") pod \"heat-d139-account-create-update-c7sz4\" (UID: \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\") " pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.798780 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-tdf78" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.819937 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.858715 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-phkjq"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.876783 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.904274 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-fccc-account-create-update-cxnnm"] Nov 26 07:04:58 crc kubenswrapper[4492]: I1126 07:04:58.924208 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-s6jw9"] Nov 26 07:04:59 crc kubenswrapper[4492]: I1126 07:04:59.065316 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-djlnq-config-ngrmm" event={"ID":"4d9de6c4-365b-45e4-a8c8-380260efa4ed","Type":"ContainerStarted","Data":"814da5467c57a748e0a0a4ae27b20c030fe0e98b7bc01e07f4e9a6d4c2fdc83d"} Nov 26 07:04:59 crc kubenswrapper[4492]: I1126 07:04:59.074830 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fccc-account-create-update-cxnnm" event={"ID":"45f05976-8325-42bb-a4ac-c49c0fd7a0c2","Type":"ContainerStarted","Data":"a5911e8b8e4f0cfccb57f9db24625a8011d93c2b088a8df6833c4b24de16492b"} Nov 26 07:04:59 crc kubenswrapper[4492]: I1126 07:04:59.084431 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-s6jw9" event={"ID":"7b557b94-67b1-424a-9f45-84ea1183e728","Type":"ContainerStarted","Data":"d2a59921393f891df152cc870e1875f1a024610d3f76c296eba9fefa9aa5c9ad"} Nov 26 07:04:59 crc kubenswrapper[4492]: I1126 07:04:59.101160 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-j8sgq" event={"ID":"17dcd8ee-932f-4f74-be85-653f6f94a213","Type":"ContainerStarted","Data":"ec64bde6351f0bc46940eccfd21a44db6e39b6048f6be897def604f723a3d7df"} Nov 26 07:04:59 crc kubenswrapper[4492]: I1126 07:04:59.131445 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-phkjq" event={"ID":"9770e190-04af-494f-9a6a-f3b242b9b9ad","Type":"ContainerStarted","Data":"83ea4a8623af405c71bf02c2d84f34dac60e66bb24f98e869c3298d73fb7be02"} Nov 26 07:04:59 crc kubenswrapper[4492]: I1126 07:04:59.149435 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"a8ee6f2618bb71c2a9e39a4c2c96d8879dcc864693ff370d2f8c32d75fb2282c"} Nov 26 07:04:59 crc kubenswrapper[4492]: I1126 07:04:59.153593 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-djlnq-config-ngrmm" podStartSLOduration=8.153581873 podStartE2EDuration="8.153581873s" podCreationTimestamp="2025-11-26 07:04:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:04:59.136094514 +0000 UTC m=+995.019982812" watchObservedRunningTime="2025-11-26 07:04:59.153581873 +0000 UTC m=+995.037470171" Nov 26 07:04:59 crc kubenswrapper[4492]: I1126 07:04:59.179881 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-j8sgq" podStartSLOduration=3.216368353 podStartE2EDuration="20.179871934s" podCreationTimestamp="2025-11-26 07:04:39 +0000 UTC" firstStartedPulling="2025-11-26 07:04:40.35808923 +0000 UTC m=+976.241977527" lastFinishedPulling="2025-11-26 07:04:57.32159281 +0000 UTC m=+993.205481108" observedRunningTime="2025-11-26 07:04:59.175238011 +0000 UTC m=+995.059126298" watchObservedRunningTime="2025-11-26 07:04:59.179871934 +0000 UTC m=+995.063760232" Nov 26 07:04:59 crc kubenswrapper[4492]: I1126 07:04:59.288564 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1cd3-account-create-update-hp2bv"] Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:04:59.542622 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-136d-account-create-update-pslhl"] Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:04:59.611079 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-d139-account-create-update-c7sz4"] Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:04:59.651570 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-c6d4d"] Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:04:59.839285 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-nl5zs"] Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.195304 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fccc-account-create-update-cxnnm" event={"ID":"45f05976-8325-42bb-a4ac-c49c0fd7a0c2","Type":"ContainerStarted","Data":"cc6a8a9aa2fe8c761d0f115f7232d5a289e9fa4ee909c92a6c4b7aa2f10c7ef0"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.211613 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-c6d4d" event={"ID":"d934c9b1-8adf-41d3-9501-d55fffe02fd7","Type":"ContainerStarted","Data":"4c923373ee28abfdc6e5f73bb265c04a73aae21d025950c80c52d5a8437542db"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.217055 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-nl5zs" event={"ID":"98538850-301a-4da8-aa72-7df26932b307","Type":"ContainerStarted","Data":"7698fb7d13adafa08be05b998815f6835e47d6a54533969b70914f57ec198d83"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.228432 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-fccc-account-create-update-cxnnm" podStartSLOduration=3.228418387 podStartE2EDuration="3.228418387s" podCreationTimestamp="2025-11-26 07:04:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:00.223104266 +0000 UTC m=+996.106992563" watchObservedRunningTime="2025-11-26 07:05:00.228418387 +0000 UTC m=+996.112306685" Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.237572 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-d139-account-create-update-c7sz4" event={"ID":"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221","Type":"ContainerStarted","Data":"297923635a8a1396cf3500ac2ae4aab939fb95aeaaa2e93c3e59669d382c50a4"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.237618 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-d139-account-create-update-c7sz4" event={"ID":"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221","Type":"ContainerStarted","Data":"19d3e57802734228cc18f9f1003bba41498d69ec418e671cef4ef700bf43123d"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.250534 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-d139-account-create-update-c7sz4" podStartSLOduration=2.25052385 podStartE2EDuration="2.25052385s" podCreationTimestamp="2025-11-26 07:04:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:00.249361675 +0000 UTC m=+996.133249972" watchObservedRunningTime="2025-11-26 07:05:00.25052385 +0000 UTC m=+996.134412148" Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.281572 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-s6jw9" event={"ID":"7b557b94-67b1-424a-9f45-84ea1183e728","Type":"ContainerStarted","Data":"9225b5f86521510ad56d9c5a1a04de3248c0aaceac3d8bf8d9bf2a940f9eb5da"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.309624 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-s6jw9" podStartSLOduration=3.30961385 podStartE2EDuration="3.30961385s" podCreationTimestamp="2025-11-26 07:04:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:00.307647273 +0000 UTC m=+996.191535571" watchObservedRunningTime="2025-11-26 07:05:00.30961385 +0000 UTC m=+996.193502148" Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.332541 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-phkjq" event={"ID":"9770e190-04af-494f-9a6a-f3b242b9b9ad","Type":"ContainerStarted","Data":"0a033c207259e67f04802c08de54ed5b21711c1159d120320b22c73a1c05aed6"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.347809 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-136d-account-create-update-pslhl" event={"ID":"27e7bb7f-22c2-4852-b264-371eeaa3907d","Type":"ContainerStarted","Data":"d04a9f1e1e2d04c91f008ca508af213e2e49acb67d3f9f2397ed841d76f309d3"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.347854 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-136d-account-create-update-pslhl" event={"ID":"27e7bb7f-22c2-4852-b264-371eeaa3907d","Type":"ContainerStarted","Data":"296db8c1c62f35aa604cd302823eddebe2acf2b4ed530351ee563746b51ecaf3"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.357006 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1cd3-account-create-update-hp2bv" event={"ID":"7ac9fc7f-8feb-41e2-b61b-084a9efd4512","Type":"ContainerStarted","Data":"bc36adc448fff470d1848e0d272ffb71681450bfc5bcddb8fdb90aef70c6cd6a"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.357040 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1cd3-account-create-update-hp2bv" event={"ID":"7ac9fc7f-8feb-41e2-b61b-084a9efd4512","Type":"ContainerStarted","Data":"c8fe1f08af902e98369a116e83876b3fe303eb8b68eac6563248f13a5f337603"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.368002 4492 generic.go:334] "Generic (PLEG): container finished" podID="4d9de6c4-365b-45e4-a8c8-380260efa4ed" containerID="814da5467c57a748e0a0a4ae27b20c030fe0e98b7bc01e07f4e9a6d4c2fdc83d" exitCode=0 Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.368632 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-djlnq-config-ngrmm" event={"ID":"4d9de6c4-365b-45e4-a8c8-380260efa4ed","Type":"ContainerDied","Data":"814da5467c57a748e0a0a4ae27b20c030fe0e98b7bc01e07f4e9a6d4c2fdc83d"} Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.393382 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-1cd3-account-create-update-hp2bv" podStartSLOduration=3.39337145 podStartE2EDuration="3.39337145s" podCreationTimestamp="2025-11-26 07:04:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:00.392728612 +0000 UTC m=+996.276616910" watchObservedRunningTime="2025-11-26 07:05:00.39337145 +0000 UTC m=+996.277259749" Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.439872 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-136d-account-create-update-pslhl" podStartSLOduration=2.4398510509999998 podStartE2EDuration="2.439851051s" podCreationTimestamp="2025-11-26 07:04:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:00.419401371 +0000 UTC m=+996.303289669" watchObservedRunningTime="2025-11-26 07:05:00.439851051 +0000 UTC m=+996.323739349" Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.747597 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-djlnq" Nov 26 07:05:00 crc kubenswrapper[4492]: I1126 07:05:00.802496 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-tdf78"] Nov 26 07:05:00 crc kubenswrapper[4492]: W1126 07:05:00.812435 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b193a3f_89f3_467c_93db_3c84e3d9272d.slice/crio-83968b3e8b20fdec38cb94524fa6e9d0e303606bd86e9ee3cf0a8f118296e769 WatchSource:0}: Error finding container 83968b3e8b20fdec38cb94524fa6e9d0e303606bd86e9ee3cf0a8f118296e769: Status 404 returned error can't find the container with id 83968b3e8b20fdec38cb94524fa6e9d0e303606bd86e9ee3cf0a8f118296e769 Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.382837 4492 generic.go:334] "Generic (PLEG): container finished" podID="98538850-301a-4da8-aa72-7df26932b307" containerID="55c9f99473c91b275fbc5b09ad9542a9dedfff61abeb72e11f1a43b64d37b6a7" exitCode=0 Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.382890 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-nl5zs" event={"ID":"98538850-301a-4da8-aa72-7df26932b307","Type":"ContainerDied","Data":"55c9f99473c91b275fbc5b09ad9542a9dedfff61abeb72e11f1a43b64d37b6a7"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.386016 4492 generic.go:334] "Generic (PLEG): container finished" podID="9770e190-04af-494f-9a6a-f3b242b9b9ad" containerID="0a033c207259e67f04802c08de54ed5b21711c1159d120320b22c73a1c05aed6" exitCode=0 Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.386089 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-phkjq" event={"ID":"9770e190-04af-494f-9a6a-f3b242b9b9ad","Type":"ContainerDied","Data":"0a033c207259e67f04802c08de54ed5b21711c1159d120320b22c73a1c05aed6"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.397250 4492 generic.go:334] "Generic (PLEG): container finished" podID="27e7bb7f-22c2-4852-b264-371eeaa3907d" containerID="d04a9f1e1e2d04c91f008ca508af213e2e49acb67d3f9f2397ed841d76f309d3" exitCode=0 Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.397361 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-136d-account-create-update-pslhl" event={"ID":"27e7bb7f-22c2-4852-b264-371eeaa3907d","Type":"ContainerDied","Data":"d04a9f1e1e2d04c91f008ca508af213e2e49acb67d3f9f2397ed841d76f309d3"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.405391 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"66586c75e7131f7397862f287b2fcad7d1bdecf770e001cce82c4a1fa0a0bb2d"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.405435 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"3167db2ba48b51518b169be94b67ab6f1344490387552cef029f02d4215329ee"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.409440 4492 generic.go:334] "Generic (PLEG): container finished" podID="7ac9fc7f-8feb-41e2-b61b-084a9efd4512" containerID="bc36adc448fff470d1848e0d272ffb71681450bfc5bcddb8fdb90aef70c6cd6a" exitCode=0 Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.409528 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1cd3-account-create-update-hp2bv" event={"ID":"7ac9fc7f-8feb-41e2-b61b-084a9efd4512","Type":"ContainerDied","Data":"bc36adc448fff470d1848e0d272ffb71681450bfc5bcddb8fdb90aef70c6cd6a"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.412149 4492 generic.go:334] "Generic (PLEG): container finished" podID="c5ed7a55-ffae-4dfd-b384-1f7eaa41a221" containerID="297923635a8a1396cf3500ac2ae4aab939fb95aeaaa2e93c3e59669d382c50a4" exitCode=0 Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.412337 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-d139-account-create-update-c7sz4" event={"ID":"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221","Type":"ContainerDied","Data":"297923635a8a1396cf3500ac2ae4aab939fb95aeaaa2e93c3e59669d382c50a4"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.414968 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-tdf78" event={"ID":"4b193a3f-89f3-467c-93db-3c84e3d9272d","Type":"ContainerDied","Data":"378c8ec908fbbdb4c51886bb6a2aae385f73667b1f051ba4e0ce2b4b30191a58"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.414745 4492 generic.go:334] "Generic (PLEG): container finished" podID="4b193a3f-89f3-467c-93db-3c84e3d9272d" containerID="378c8ec908fbbdb4c51886bb6a2aae385f73667b1f051ba4e0ce2b4b30191a58" exitCode=0 Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.418707 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-tdf78" event={"ID":"4b193a3f-89f3-467c-93db-3c84e3d9272d","Type":"ContainerStarted","Data":"83968b3e8b20fdec38cb94524fa6e9d0e303606bd86e9ee3cf0a8f118296e769"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.432892 4492 generic.go:334] "Generic (PLEG): container finished" podID="45f05976-8325-42bb-a4ac-c49c0fd7a0c2" containerID="cc6a8a9aa2fe8c761d0f115f7232d5a289e9fa4ee909c92a6c4b7aa2f10c7ef0" exitCode=0 Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.433057 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fccc-account-create-update-cxnnm" event={"ID":"45f05976-8325-42bb-a4ac-c49c0fd7a0c2","Type":"ContainerDied","Data":"cc6a8a9aa2fe8c761d0f115f7232d5a289e9fa4ee909c92a6c4b7aa2f10c7ef0"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.435679 4492 generic.go:334] "Generic (PLEG): container finished" podID="7b557b94-67b1-424a-9f45-84ea1183e728" containerID="9225b5f86521510ad56d9c5a1a04de3248c0aaceac3d8bf8d9bf2a940f9eb5da" exitCode=0 Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.436018 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-s6jw9" event={"ID":"7b557b94-67b1-424a-9f45-84ea1183e728","Type":"ContainerDied","Data":"9225b5f86521510ad56d9c5a1a04de3248c0aaceac3d8bf8d9bf2a940f9eb5da"} Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.799684 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-phkjq" Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.876512 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.898333 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9770e190-04af-494f-9a6a-f3b242b9b9ad-operator-scripts\") pod \"9770e190-04af-494f-9a6a-f3b242b9b9ad\" (UID: \"9770e190-04af-494f-9a6a-f3b242b9b9ad\") " Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.898663 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xq6mt\" (UniqueName: \"kubernetes.io/projected/9770e190-04af-494f-9a6a-f3b242b9b9ad-kube-api-access-xq6mt\") pod \"9770e190-04af-494f-9a6a-f3b242b9b9ad\" (UID: \"9770e190-04af-494f-9a6a-f3b242b9b9ad\") " Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.899008 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9770e190-04af-494f-9a6a-f3b242b9b9ad-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9770e190-04af-494f-9a6a-f3b242b9b9ad" (UID: "9770e190-04af-494f-9a6a-f3b242b9b9ad"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.899542 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9770e190-04af-494f-9a6a-f3b242b9b9ad-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:01 crc kubenswrapper[4492]: I1126 07:05:01.907318 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9770e190-04af-494f-9a6a-f3b242b9b9ad-kube-api-access-xq6mt" (OuterVolumeSpecName: "kube-api-access-xq6mt") pod "9770e190-04af-494f-9a6a-f3b242b9b9ad" (UID: "9770e190-04af-494f-9a6a-f3b242b9b9ad"). InnerVolumeSpecName "kube-api-access-xq6mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.001124 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run\") pod \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.001594 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-log-ovn\") pod \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.001676 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-scripts\") pod \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.001848 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwkp7\" (UniqueName: \"kubernetes.io/projected/4d9de6c4-365b-45e4-a8c8-380260efa4ed-kube-api-access-bwkp7\") pod \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.001953 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-additional-scripts\") pod \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.002077 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run-ovn\") pod \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\" (UID: \"4d9de6c4-365b-45e4-a8c8-380260efa4ed\") " Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.002612 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xq6mt\" (UniqueName: \"kubernetes.io/projected/9770e190-04af-494f-9a6a-f3b242b9b9ad-kube-api-access-xq6mt\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.001270 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run" (OuterVolumeSpecName: "var-run") pod "4d9de6c4-365b-45e4-a8c8-380260efa4ed" (UID: "4d9de6c4-365b-45e4-a8c8-380260efa4ed"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.002676 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "4d9de6c4-365b-45e4-a8c8-380260efa4ed" (UID: "4d9de6c4-365b-45e4-a8c8-380260efa4ed"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.002731 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "4d9de6c4-365b-45e4-a8c8-380260efa4ed" (UID: "4d9de6c4-365b-45e4-a8c8-380260efa4ed"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.003700 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "4d9de6c4-365b-45e4-a8c8-380260efa4ed" (UID: "4d9de6c4-365b-45e4-a8c8-380260efa4ed"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.003865 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-scripts" (OuterVolumeSpecName: "scripts") pod "4d9de6c4-365b-45e4-a8c8-380260efa4ed" (UID: "4d9de6c4-365b-45e4-a8c8-380260efa4ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.008387 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d9de6c4-365b-45e4-a8c8-380260efa4ed-kube-api-access-bwkp7" (OuterVolumeSpecName: "kube-api-access-bwkp7") pod "4d9de6c4-365b-45e4-a8c8-380260efa4ed" (UID: "4d9de6c4-365b-45e4-a8c8-380260efa4ed"). InnerVolumeSpecName "kube-api-access-bwkp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.105077 4492 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.105118 4492 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.105134 4492 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.105149 4492 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4d9de6c4-365b-45e4-a8c8-380260efa4ed-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.105158 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d9de6c4-365b-45e4-a8c8-380260efa4ed-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.105184 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwkp7\" (UniqueName: \"kubernetes.io/projected/4d9de6c4-365b-45e4-a8c8-380260efa4ed-kube-api-access-bwkp7\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.279552 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-djlnq-config-ngrmm"] Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.284724 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-djlnq-config-ngrmm"] Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.403553 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-djlnq-config-q79vx"] Nov 26 07:05:02 crc kubenswrapper[4492]: E1126 07:05:02.403895 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d9de6c4-365b-45e4-a8c8-380260efa4ed" containerName="ovn-config" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.403913 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d9de6c4-365b-45e4-a8c8-380260efa4ed" containerName="ovn-config" Nov 26 07:05:02 crc kubenswrapper[4492]: E1126 07:05:02.403943 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9770e190-04af-494f-9a6a-f3b242b9b9ad" containerName="mariadb-database-create" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.403950 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9770e190-04af-494f-9a6a-f3b242b9b9ad" containerName="mariadb-database-create" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.404088 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d9de6c4-365b-45e4-a8c8-380260efa4ed" containerName="ovn-config" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.404126 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9770e190-04af-494f-9a6a-f3b242b9b9ad" containerName="mariadb-database-create" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.404612 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.416767 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-djlnq-config-q79vx"] Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.473842 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d9de6c4-365b-45e4-a8c8-380260efa4ed" path="/var/lib/kubelet/pods/4d9de6c4-365b-45e4-a8c8-380260efa4ed/volumes" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.490050 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-phkjq" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.493254 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-phkjq" event={"ID":"9770e190-04af-494f-9a6a-f3b242b9b9ad","Type":"ContainerDied","Data":"83ea4a8623af405c71bf02c2d84f34dac60e66bb24f98e869c3298d73fb7be02"} Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.493295 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83ea4a8623af405c71bf02c2d84f34dac60e66bb24f98e869c3298d73fb7be02" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.512833 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjfpb\" (UniqueName: \"kubernetes.io/projected/ee7058e9-1aef-482a-9309-ac36996e9c82-kube-api-access-bjfpb\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.512931 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-scripts\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.513042 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.513097 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-additional-scripts\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.513126 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run-ovn\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.513206 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-log-ovn\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.518377 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"0ca99faacce972513601c832c10a379fb161796bdea8ffb8835ca11e9c4fc4a7"} Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.518424 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"97ac8db112490251685cb0f95b01ba921b4303e6ca6c37e5d045f238aef7e0e5"} Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.530154 4492 scope.go:117] "RemoveContainer" containerID="814da5467c57a748e0a0a4ae27b20c030fe0e98b7bc01e07f4e9a6d4c2fdc83d" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.530361 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq-config-ngrmm" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.615116 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjfpb\" (UniqueName: \"kubernetes.io/projected/ee7058e9-1aef-482a-9309-ac36996e9c82-kube-api-access-bjfpb\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.615206 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-scripts\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.615253 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.615285 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-additional-scripts\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.615302 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run-ovn\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.615336 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-log-ovn\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.616911 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.617068 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-log-ovn\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.622326 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-additional-scripts\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.622735 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-scripts\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.624389 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run-ovn\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.653269 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjfpb\" (UniqueName: \"kubernetes.io/projected/ee7058e9-1aef-482a-9309-ac36996e9c82-kube-api-access-bjfpb\") pod \"ovn-controller-djlnq-config-q79vx\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.719294 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:02 crc kubenswrapper[4492]: I1126 07:05:02.880810 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-nl5zs" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.027252 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtkbc\" (UniqueName: \"kubernetes.io/projected/98538850-301a-4da8-aa72-7df26932b307-kube-api-access-mtkbc\") pod \"98538850-301a-4da8-aa72-7df26932b307\" (UID: \"98538850-301a-4da8-aa72-7df26932b307\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.027465 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98538850-301a-4da8-aa72-7df26932b307-operator-scripts\") pod \"98538850-301a-4da8-aa72-7df26932b307\" (UID: \"98538850-301a-4da8-aa72-7df26932b307\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.030700 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98538850-301a-4da8-aa72-7df26932b307-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "98538850-301a-4da8-aa72-7df26932b307" (UID: "98538850-301a-4da8-aa72-7df26932b307"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.035564 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98538850-301a-4da8-aa72-7df26932b307-kube-api-access-mtkbc" (OuterVolumeSpecName: "kube-api-access-mtkbc") pod "98538850-301a-4da8-aa72-7df26932b307" (UID: "98538850-301a-4da8-aa72-7df26932b307"). InnerVolumeSpecName "kube-api-access-mtkbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.134838 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtkbc\" (UniqueName: \"kubernetes.io/projected/98538850-301a-4da8-aa72-7df26932b307-kube-api-access-mtkbc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.135115 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/98538850-301a-4da8-aa72-7df26932b307-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.148068 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.180005 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.181763 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.188389 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.240877 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-operator-scripts\") pod \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\" (UID: \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.241018 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2szk4\" (UniqueName: \"kubernetes.io/projected/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-kube-api-access-2szk4\") pod \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\" (UID: \"7ac9fc7f-8feb-41e2-b61b-084a9efd4512\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.241379 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7ac9fc7f-8feb-41e2-b61b-084a9efd4512" (UID: "7ac9fc7f-8feb-41e2-b61b-084a9efd4512"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.241860 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.246403 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-kube-api-access-2szk4" (OuterVolumeSpecName: "kube-api-access-2szk4") pod "7ac9fc7f-8feb-41e2-b61b-084a9efd4512" (UID: "7ac9fc7f-8feb-41e2-b61b-084a9efd4512"). InnerVolumeSpecName "kube-api-access-2szk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.278554 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-tdf78" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.280726 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-s6jw9" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.345834 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27e7bb7f-22c2-4852-b264-371eeaa3907d-operator-scripts\") pod \"27e7bb7f-22c2-4852-b264-371eeaa3907d\" (UID: \"27e7bb7f-22c2-4852-b264-371eeaa3907d\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.345898 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4zvd\" (UniqueName: \"kubernetes.io/projected/27e7bb7f-22c2-4852-b264-371eeaa3907d-kube-api-access-m4zvd\") pod \"27e7bb7f-22c2-4852-b264-371eeaa3907d\" (UID: \"27e7bb7f-22c2-4852-b264-371eeaa3907d\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.345961 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjqz4\" (UniqueName: \"kubernetes.io/projected/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-kube-api-access-sjqz4\") pod \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\" (UID: \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.346015 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-operator-scripts\") pod \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\" (UID: \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.346144 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5p4pk\" (UniqueName: \"kubernetes.io/projected/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-kube-api-access-5p4pk\") pod \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\" (UID: \"45f05976-8325-42bb-a4ac-c49c0fd7a0c2\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.346193 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-operator-scripts\") pod \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\" (UID: \"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.346600 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2szk4\" (UniqueName: \"kubernetes.io/projected/7ac9fc7f-8feb-41e2-b61b-084a9efd4512-kube-api-access-2szk4\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.348528 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c5ed7a55-ffae-4dfd-b384-1f7eaa41a221" (UID: "c5ed7a55-ffae-4dfd-b384-1f7eaa41a221"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.348947 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27e7bb7f-22c2-4852-b264-371eeaa3907d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27e7bb7f-22c2-4852-b264-371eeaa3907d" (UID: "27e7bb7f-22c2-4852-b264-371eeaa3907d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.355904 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27e7bb7f-22c2-4852-b264-371eeaa3907d-kube-api-access-m4zvd" (OuterVolumeSpecName: "kube-api-access-m4zvd") pod "27e7bb7f-22c2-4852-b264-371eeaa3907d" (UID: "27e7bb7f-22c2-4852-b264-371eeaa3907d"). InnerVolumeSpecName "kube-api-access-m4zvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.362329 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-kube-api-access-sjqz4" (OuterVolumeSpecName: "kube-api-access-sjqz4") pod "c5ed7a55-ffae-4dfd-b384-1f7eaa41a221" (UID: "c5ed7a55-ffae-4dfd-b384-1f7eaa41a221"). InnerVolumeSpecName "kube-api-access-sjqz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.362661 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "45f05976-8325-42bb-a4ac-c49c0fd7a0c2" (UID: "45f05976-8325-42bb-a4ac-c49c0fd7a0c2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.365194 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-kube-api-access-5p4pk" (OuterVolumeSpecName: "kube-api-access-5p4pk") pod "45f05976-8325-42bb-a4ac-c49c0fd7a0c2" (UID: "45f05976-8325-42bb-a4ac-c49c0fd7a0c2"). InnerVolumeSpecName "kube-api-access-5p4pk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.447505 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b193a3f-89f3-467c-93db-3c84e3d9272d-operator-scripts\") pod \"4b193a3f-89f3-467c-93db-3c84e3d9272d\" (UID: \"4b193a3f-89f3-467c-93db-3c84e3d9272d\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.447599 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b557b94-67b1-424a-9f45-84ea1183e728-operator-scripts\") pod \"7b557b94-67b1-424a-9f45-84ea1183e728\" (UID: \"7b557b94-67b1-424a-9f45-84ea1183e728\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.447890 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b193a3f-89f3-467c-93db-3c84e3d9272d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b193a3f-89f3-467c-93db-3c84e3d9272d" (UID: "4b193a3f-89f3-467c-93db-3c84e3d9272d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.447938 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b557b94-67b1-424a-9f45-84ea1183e728-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7b557b94-67b1-424a-9f45-84ea1183e728" (UID: "7b557b94-67b1-424a-9f45-84ea1183e728"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448072 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7mh2\" (UniqueName: \"kubernetes.io/projected/4b193a3f-89f3-467c-93db-3c84e3d9272d-kube-api-access-b7mh2\") pod \"4b193a3f-89f3-467c-93db-3c84e3d9272d\" (UID: \"4b193a3f-89f3-467c-93db-3c84e3d9272d\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448367 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nv9cv\" (UniqueName: \"kubernetes.io/projected/7b557b94-67b1-424a-9f45-84ea1183e728-kube-api-access-nv9cv\") pod \"7b557b94-67b1-424a-9f45-84ea1183e728\" (UID: \"7b557b94-67b1-424a-9f45-84ea1183e728\") " Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448751 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b557b94-67b1-424a-9f45-84ea1183e728-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448768 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5p4pk\" (UniqueName: \"kubernetes.io/projected/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-kube-api-access-5p4pk\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448779 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448787 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27e7bb7f-22c2-4852-b264-371eeaa3907d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448795 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4zvd\" (UniqueName: \"kubernetes.io/projected/27e7bb7f-22c2-4852-b264-371eeaa3907d-kube-api-access-m4zvd\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448802 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjqz4\" (UniqueName: \"kubernetes.io/projected/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221-kube-api-access-sjqz4\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448809 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45f05976-8325-42bb-a4ac-c49c0fd7a0c2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.448817 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b193a3f-89f3-467c-93db-3c84e3d9272d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.454510 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b557b94-67b1-424a-9f45-84ea1183e728-kube-api-access-nv9cv" (OuterVolumeSpecName: "kube-api-access-nv9cv") pod "7b557b94-67b1-424a-9f45-84ea1183e728" (UID: "7b557b94-67b1-424a-9f45-84ea1183e728"). InnerVolumeSpecName "kube-api-access-nv9cv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.455244 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b193a3f-89f3-467c-93db-3c84e3d9272d-kube-api-access-b7mh2" (OuterVolumeSpecName: "kube-api-access-b7mh2") pod "4b193a3f-89f3-467c-93db-3c84e3d9272d" (UID: "4b193a3f-89f3-467c-93db-3c84e3d9272d"). InnerVolumeSpecName "kube-api-access-b7mh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.523974 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-djlnq-config-q79vx"] Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.551362 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7mh2\" (UniqueName: \"kubernetes.io/projected/4b193a3f-89f3-467c-93db-3c84e3d9272d-kube-api-access-b7mh2\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.551395 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nv9cv\" (UniqueName: \"kubernetes.io/projected/7b557b94-67b1-424a-9f45-84ea1183e728-kube-api-access-nv9cv\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.554857 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-d139-account-create-update-c7sz4" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.554840 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-d139-account-create-update-c7sz4" event={"ID":"c5ed7a55-ffae-4dfd-b384-1f7eaa41a221","Type":"ContainerDied","Data":"19d3e57802734228cc18f9f1003bba41498d69ec418e671cef4ef700bf43123d"} Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.554997 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19d3e57802734228cc18f9f1003bba41498d69ec418e671cef4ef700bf43123d" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.556391 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-tdf78" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.556440 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-tdf78" event={"ID":"4b193a3f-89f3-467c-93db-3c84e3d9272d","Type":"ContainerDied","Data":"83968b3e8b20fdec38cb94524fa6e9d0e303606bd86e9ee3cf0a8f118296e769"} Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.556481 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83968b3e8b20fdec38cb94524fa6e9d0e303606bd86e9ee3cf0a8f118296e769" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.558194 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fccc-account-create-update-cxnnm" event={"ID":"45f05976-8325-42bb-a4ac-c49c0fd7a0c2","Type":"ContainerDied","Data":"a5911e8b8e4f0cfccb57f9db24625a8011d93c2b088a8df6833c4b24de16492b"} Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.558221 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5911e8b8e4f0cfccb57f9db24625a8011d93c2b088a8df6833c4b24de16492b" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.558276 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fccc-account-create-update-cxnnm" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.561443 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-s6jw9" event={"ID":"7b557b94-67b1-424a-9f45-84ea1183e728","Type":"ContainerDied","Data":"d2a59921393f891df152cc870e1875f1a024610d3f76c296eba9fefa9aa5c9ad"} Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.561487 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2a59921393f891df152cc870e1875f1a024610d3f76c296eba9fefa9aa5c9ad" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.561551 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-s6jw9" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.569726 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-136d-account-create-update-pslhl" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.569720 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-136d-account-create-update-pslhl" event={"ID":"27e7bb7f-22c2-4852-b264-371eeaa3907d","Type":"ContainerDied","Data":"296db8c1c62f35aa604cd302823eddebe2acf2b4ed530351ee563746b51ecaf3"} Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.569832 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="296db8c1c62f35aa604cd302823eddebe2acf2b4ed530351ee563746b51ecaf3" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.574672 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-nl5zs" event={"ID":"98538850-301a-4da8-aa72-7df26932b307","Type":"ContainerDied","Data":"7698fb7d13adafa08be05b998815f6835e47d6a54533969b70914f57ec198d83"} Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.574709 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7698fb7d13adafa08be05b998815f6835e47d6a54533969b70914f57ec198d83" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.574753 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-nl5zs" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.580957 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1cd3-account-create-update-hp2bv" event={"ID":"7ac9fc7f-8feb-41e2-b61b-084a9efd4512","Type":"ContainerDied","Data":"c8fe1f08af902e98369a116e83876b3fe303eb8b68eac6563248f13a5f337603"} Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.580991 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8fe1f08af902e98369a116e83876b3fe303eb8b68eac6563248f13a5f337603" Nov 26 07:05:03 crc kubenswrapper[4492]: I1126 07:05:03.581045 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1cd3-account-create-update-hp2bv" Nov 26 07:05:03 crc kubenswrapper[4492]: W1126 07:05:03.833952 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee7058e9_1aef_482a_9309_ac36996e9c82.slice/crio-6a29234ff1a2e216c70d4f713fbe64db2ae90a63906d797602d91de7370031e9 WatchSource:0}: Error finding container 6a29234ff1a2e216c70d4f713fbe64db2ae90a63906d797602d91de7370031e9: Status 404 returned error can't find the container with id 6a29234ff1a2e216c70d4f713fbe64db2ae90a63906d797602d91de7370031e9 Nov 26 07:05:04 crc kubenswrapper[4492]: I1126 07:05:04.595691 4492 generic.go:334] "Generic (PLEG): container finished" podID="ee7058e9-1aef-482a-9309-ac36996e9c82" containerID="1b096e66db6b7b4cd6cc6e1d50b3955fff54d6894ade0cf6c6ae975af77b78d0" exitCode=0 Nov 26 07:05:04 crc kubenswrapper[4492]: I1126 07:05:04.595905 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-djlnq-config-q79vx" event={"ID":"ee7058e9-1aef-482a-9309-ac36996e9c82","Type":"ContainerDied","Data":"1b096e66db6b7b4cd6cc6e1d50b3955fff54d6894ade0cf6c6ae975af77b78d0"} Nov 26 07:05:04 crc kubenswrapper[4492]: I1126 07:05:04.596391 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-djlnq-config-q79vx" event={"ID":"ee7058e9-1aef-482a-9309-ac36996e9c82","Type":"ContainerStarted","Data":"6a29234ff1a2e216c70d4f713fbe64db2ae90a63906d797602d91de7370031e9"} Nov 26 07:05:04 crc kubenswrapper[4492]: I1126 07:05:04.605135 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"0e3e5bd252b1518addce3f39fe1fa2c6f02f78374c70b5f42a7e43626fb1c6d0"} Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.367893 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.418995 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run-ovn\") pod \"ee7058e9-1aef-482a-9309-ac36996e9c82\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.419083 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjfpb\" (UniqueName: \"kubernetes.io/projected/ee7058e9-1aef-482a-9309-ac36996e9c82-kube-api-access-bjfpb\") pod \"ee7058e9-1aef-482a-9309-ac36996e9c82\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.419165 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-log-ovn\") pod \"ee7058e9-1aef-482a-9309-ac36996e9c82\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.419208 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-scripts\") pod \"ee7058e9-1aef-482a-9309-ac36996e9c82\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.419267 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run\") pod \"ee7058e9-1aef-482a-9309-ac36996e9c82\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.419344 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "ee7058e9-1aef-482a-9309-ac36996e9c82" (UID: "ee7058e9-1aef-482a-9309-ac36996e9c82"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.419367 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-additional-scripts\") pod \"ee7058e9-1aef-482a-9309-ac36996e9c82\" (UID: \"ee7058e9-1aef-482a-9309-ac36996e9c82\") " Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.419624 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "ee7058e9-1aef-482a-9309-ac36996e9c82" (UID: "ee7058e9-1aef-482a-9309-ac36996e9c82"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.419654 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run" (OuterVolumeSpecName: "var-run") pod "ee7058e9-1aef-482a-9309-ac36996e9c82" (UID: "ee7058e9-1aef-482a-9309-ac36996e9c82"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.420299 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "ee7058e9-1aef-482a-9309-ac36996e9c82" (UID: "ee7058e9-1aef-482a-9309-ac36996e9c82"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.420440 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-scripts" (OuterVolumeSpecName: "scripts") pod "ee7058e9-1aef-482a-9309-ac36996e9c82" (UID: "ee7058e9-1aef-482a-9309-ac36996e9c82"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.420734 4492 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.420748 4492 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.420758 4492 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.420766 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee7058e9-1aef-482a-9309-ac36996e9c82-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.420774 4492 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ee7058e9-1aef-482a-9309-ac36996e9c82-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.422812 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee7058e9-1aef-482a-9309-ac36996e9c82-kube-api-access-bjfpb" (OuterVolumeSpecName: "kube-api-access-bjfpb") pod "ee7058e9-1aef-482a-9309-ac36996e9c82" (UID: "ee7058e9-1aef-482a-9309-ac36996e9c82"). InnerVolumeSpecName "kube-api-access-bjfpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.548009 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjfpb\" (UniqueName: \"kubernetes.io/projected/ee7058e9-1aef-482a-9309-ac36996e9c82-kube-api-access-bjfpb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.662348 4492 generic.go:334] "Generic (PLEG): container finished" podID="17dcd8ee-932f-4f74-be85-653f6f94a213" containerID="ec64bde6351f0bc46940eccfd21a44db6e39b6048f6be897def604f723a3d7df" exitCode=0 Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.662442 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-j8sgq" event={"ID":"17dcd8ee-932f-4f74-be85-653f6f94a213","Type":"ContainerDied","Data":"ec64bde6351f0bc46940eccfd21a44db6e39b6048f6be897def604f723a3d7df"} Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.669269 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-c6d4d" event={"ID":"d934c9b1-8adf-41d3-9501-d55fffe02fd7","Type":"ContainerStarted","Data":"a7da5b7c9163e0278ea4c98c0b4679a9f780720bd8fe822e5d4bd5e3c2246515"} Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.686531 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"e8a1d782b6504d0ff7504b1e1a2efa9091ede63eec78e4c75df883f51f62dd36"} Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.686575 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"1edd1af828a8d630a7eeec4e4b59b94ca355b8fff96abfafc2dc8d313fbf84ba"} Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.688054 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-djlnq-config-q79vx" event={"ID":"ee7058e9-1aef-482a-9309-ac36996e9c82","Type":"ContainerDied","Data":"6a29234ff1a2e216c70d4f713fbe64db2ae90a63906d797602d91de7370031e9"} Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.688152 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a29234ff1a2e216c70d4f713fbe64db2ae90a63906d797602d91de7370031e9" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.688287 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-djlnq-config-q79vx" Nov 26 07:05:07 crc kubenswrapper[4492]: I1126 07:05:07.699338 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-c6d4d" podStartSLOduration=2.183781322 podStartE2EDuration="9.69932262s" podCreationTimestamp="2025-11-26 07:04:58 +0000 UTC" firstStartedPulling="2025-11-26 07:04:59.711486277 +0000 UTC m=+995.595374576" lastFinishedPulling="2025-11-26 07:05:07.227027576 +0000 UTC m=+1003.110915874" observedRunningTime="2025-11-26 07:05:07.698563373 +0000 UTC m=+1003.582451670" watchObservedRunningTime="2025-11-26 07:05:07.69932262 +0000 UTC m=+1003.583210918" Nov 26 07:05:08 crc kubenswrapper[4492]: I1126 07:05:08.464220 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-djlnq-config-q79vx"] Nov 26 07:05:08 crc kubenswrapper[4492]: I1126 07:05:08.469585 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-djlnq-config-q79vx"] Nov 26 07:05:08 crc kubenswrapper[4492]: I1126 07:05:08.704666 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"9530d8426a565ecca3ccde283e79690dbad59ddf66dfafaabc98487d3148d46a"} Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.320705 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-j8sgq" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.503943 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-config-data\") pod \"17dcd8ee-932f-4f74-be85-653f6f94a213\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.504392 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-combined-ca-bundle\") pod \"17dcd8ee-932f-4f74-be85-653f6f94a213\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.504670 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlh42\" (UniqueName: \"kubernetes.io/projected/17dcd8ee-932f-4f74-be85-653f6f94a213-kube-api-access-rlh42\") pod \"17dcd8ee-932f-4f74-be85-653f6f94a213\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.504751 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-db-sync-config-data\") pod \"17dcd8ee-932f-4f74-be85-653f6f94a213\" (UID: \"17dcd8ee-932f-4f74-be85-653f6f94a213\") " Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.523934 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "17dcd8ee-932f-4f74-be85-653f6f94a213" (UID: "17dcd8ee-932f-4f74-be85-653f6f94a213"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.524927 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17dcd8ee-932f-4f74-be85-653f6f94a213-kube-api-access-rlh42" (OuterVolumeSpecName: "kube-api-access-rlh42") pod "17dcd8ee-932f-4f74-be85-653f6f94a213" (UID: "17dcd8ee-932f-4f74-be85-653f6f94a213"). InnerVolumeSpecName "kube-api-access-rlh42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.545659 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-config-data" (OuterVolumeSpecName: "config-data") pod "17dcd8ee-932f-4f74-be85-653f6f94a213" (UID: "17dcd8ee-932f-4f74-be85-653f6f94a213"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.548144 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17dcd8ee-932f-4f74-be85-653f6f94a213" (UID: "17dcd8ee-932f-4f74-be85-653f6f94a213"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.607369 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.608397 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.608433 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlh42\" (UniqueName: \"kubernetes.io/projected/17dcd8ee-932f-4f74-be85-653f6f94a213-kube-api-access-rlh42\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.608450 4492 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/17dcd8ee-932f-4f74-be85-653f6f94a213-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.722719 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-j8sgq" event={"ID":"17dcd8ee-932f-4f74-be85-653f6f94a213","Type":"ContainerDied","Data":"e5ba151e783e26d24ff36d2e824b60322e3b8d76daab8a9ff6df2003590736f0"} Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.722757 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5ba151e783e26d24ff36d2e824b60322e3b8d76daab8a9ff6df2003590736f0" Nov 26 07:05:09 crc kubenswrapper[4492]: I1126 07:05:09.722773 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-j8sgq" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.083554 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7b9fb5b88f-5mpj7"] Nov 26 07:05:10 crc kubenswrapper[4492]: E1126 07:05:10.083863 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17dcd8ee-932f-4f74-be85-653f6f94a213" containerName="glance-db-sync" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.083880 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="17dcd8ee-932f-4f74-be85-653f6f94a213" containerName="glance-db-sync" Nov 26 07:05:10 crc kubenswrapper[4492]: E1126 07:05:10.083894 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27e7bb7f-22c2-4852-b264-371eeaa3907d" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.083900 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="27e7bb7f-22c2-4852-b264-371eeaa3907d" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: E1126 07:05:10.083909 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee7058e9-1aef-482a-9309-ac36996e9c82" containerName="ovn-config" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.083924 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee7058e9-1aef-482a-9309-ac36996e9c82" containerName="ovn-config" Nov 26 07:05:10 crc kubenswrapper[4492]: E1126 07:05:10.083934 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b557b94-67b1-424a-9f45-84ea1183e728" containerName="mariadb-database-create" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.083939 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b557b94-67b1-424a-9f45-84ea1183e728" containerName="mariadb-database-create" Nov 26 07:05:10 crc kubenswrapper[4492]: E1126 07:05:10.083948 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98538850-301a-4da8-aa72-7df26932b307" containerName="mariadb-database-create" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.083953 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="98538850-301a-4da8-aa72-7df26932b307" containerName="mariadb-database-create" Nov 26 07:05:10 crc kubenswrapper[4492]: E1126 07:05:10.083962 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ac9fc7f-8feb-41e2-b61b-084a9efd4512" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.083967 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ac9fc7f-8feb-41e2-b61b-084a9efd4512" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: E1126 07:05:10.083979 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45f05976-8325-42bb-a4ac-c49c0fd7a0c2" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.083985 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="45f05976-8325-42bb-a4ac-c49c0fd7a0c2" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: E1126 07:05:10.083995 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b193a3f-89f3-467c-93db-3c84e3d9272d" containerName="mariadb-database-create" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084001 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b193a3f-89f3-467c-93db-3c84e3d9272d" containerName="mariadb-database-create" Nov 26 07:05:10 crc kubenswrapper[4492]: E1126 07:05:10.084019 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5ed7a55-ffae-4dfd-b384-1f7eaa41a221" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084025 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5ed7a55-ffae-4dfd-b384-1f7eaa41a221" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084163 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="27e7bb7f-22c2-4852-b264-371eeaa3907d" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084193 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="98538850-301a-4da8-aa72-7df26932b307" containerName="mariadb-database-create" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084209 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee7058e9-1aef-482a-9309-ac36996e9c82" containerName="ovn-config" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084222 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5ed7a55-ffae-4dfd-b384-1f7eaa41a221" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084232 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ac9fc7f-8feb-41e2-b61b-084a9efd4512" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084239 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b557b94-67b1-424a-9f45-84ea1183e728" containerName="mariadb-database-create" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084246 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="45f05976-8325-42bb-a4ac-c49c0fd7a0c2" containerName="mariadb-account-create-update" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084257 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="17dcd8ee-932f-4f74-be85-653f6f94a213" containerName="glance-db-sync" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084265 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b193a3f-89f3-467c-93db-3c84e3d9272d" containerName="mariadb-database-create" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.084974 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.096145 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b9fb5b88f-5mpj7"] Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.220139 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-nb\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.220411 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q46bg\" (UniqueName: \"kubernetes.io/projected/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-kube-api-access-q46bg\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.220535 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-dns-svc\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.220611 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-sb\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.220834 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-config\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.323104 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-config\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.323483 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-nb\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.323559 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q46bg\" (UniqueName: \"kubernetes.io/projected/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-kube-api-access-q46bg\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.323614 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-dns-svc\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.323652 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-sb\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.323980 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-config\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.324366 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-nb\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.324626 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-dns-svc\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.324671 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-sb\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.355393 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q46bg\" (UniqueName: \"kubernetes.io/projected/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-kube-api-access-q46bg\") pod \"dnsmasq-dns-7b9fb5b88f-5mpj7\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.404336 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.479852 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee7058e9-1aef-482a-9309-ac36996e9c82" path="/var/lib/kubelet/pods/ee7058e9-1aef-482a-9309-ac36996e9c82/volumes" Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.746640 4492 generic.go:334] "Generic (PLEG): container finished" podID="d934c9b1-8adf-41d3-9501-d55fffe02fd7" containerID="a7da5b7c9163e0278ea4c98c0b4679a9f780720bd8fe822e5d4bd5e3c2246515" exitCode=0 Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.746729 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-c6d4d" event={"ID":"d934c9b1-8adf-41d3-9501-d55fffe02fd7","Type":"ContainerDied","Data":"a7da5b7c9163e0278ea4c98c0b4679a9f780720bd8fe822e5d4bd5e3c2246515"} Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.758129 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"5aa555a35ca7b3b3707bb56732066cbb86b16ac0095f80936dd4102c08b6cf2e"} Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.758186 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"d3c992324d2a75a6349ab1fa1b52e6aca5a75e3ce829691363acf83c751a7e34"} Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.758207 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"0612a7cde2e26d78af4e06c22627297db5dbdb58596b395d31d86ca8ba74344c"} Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.758220 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"e6f8df044d0fc0060c443a28835ba6d81e798c32f466808551c35781cbbeeee8"} Nov 26 07:05:10 crc kubenswrapper[4492]: I1126 07:05:10.911805 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b9fb5b88f-5mpj7"] Nov 26 07:05:10 crc kubenswrapper[4492]: W1126 07:05:10.919167 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f5bbf95_5ea6_4d2f_becb_7d45e5a07613.slice/crio-e88edd4a7dc2328ebcc4a2c26f8752ca251462588dd0e47de5c7d81f37b48c08 WatchSource:0}: Error finding container e88edd4a7dc2328ebcc4a2c26f8752ca251462588dd0e47de5c7d81f37b48c08: Status 404 returned error can't find the container with id e88edd4a7dc2328ebcc4a2c26f8752ca251462588dd0e47de5c7d81f37b48c08 Nov 26 07:05:11 crc kubenswrapper[4492]: I1126 07:05:11.782860 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"3dbe2ab9285d4f26c72fb745076b80523b81b85f6c86477f1b0bc90902719cc3"} Nov 26 07:05:11 crc kubenswrapper[4492]: I1126 07:05:11.783355 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"b5a50d774efe45cda18ac7411a4abaf71414fe27985dc27d18d5817468435a85"} Nov 26 07:05:11 crc kubenswrapper[4492]: I1126 07:05:11.784797 4492 generic.go:334] "Generic (PLEG): container finished" podID="2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" containerID="ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb" exitCode=0 Nov 26 07:05:11 crc kubenswrapper[4492]: I1126 07:05:11.785278 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" event={"ID":"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613","Type":"ContainerDied","Data":"ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb"} Nov 26 07:05:11 crc kubenswrapper[4492]: I1126 07:05:11.785329 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" event={"ID":"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613","Type":"ContainerStarted","Data":"e88edd4a7dc2328ebcc4a2c26f8752ca251462588dd0e47de5c7d81f37b48c08"} Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.021313 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.162593 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-combined-ca-bundle\") pod \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.162676 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-config-data\") pod \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.162713 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62tsv\" (UniqueName: \"kubernetes.io/projected/d934c9b1-8adf-41d3-9501-d55fffe02fd7-kube-api-access-62tsv\") pod \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\" (UID: \"d934c9b1-8adf-41d3-9501-d55fffe02fd7\") " Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.169495 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d934c9b1-8adf-41d3-9501-d55fffe02fd7-kube-api-access-62tsv" (OuterVolumeSpecName: "kube-api-access-62tsv") pod "d934c9b1-8adf-41d3-9501-d55fffe02fd7" (UID: "d934c9b1-8adf-41d3-9501-d55fffe02fd7"). InnerVolumeSpecName "kube-api-access-62tsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.190667 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d934c9b1-8adf-41d3-9501-d55fffe02fd7" (UID: "d934c9b1-8adf-41d3-9501-d55fffe02fd7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.207281 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-config-data" (OuterVolumeSpecName: "config-data") pod "d934c9b1-8adf-41d3-9501-d55fffe02fd7" (UID: "d934c9b1-8adf-41d3-9501-d55fffe02fd7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.265316 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.265365 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d934c9b1-8adf-41d3-9501-d55fffe02fd7-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.265380 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62tsv\" (UniqueName: \"kubernetes.io/projected/d934c9b1-8adf-41d3-9501-d55fffe02fd7-kube-api-access-62tsv\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.795200 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-c6d4d" event={"ID":"d934c9b1-8adf-41d3-9501-d55fffe02fd7","Type":"ContainerDied","Data":"4c923373ee28abfdc6e5f73bb265c04a73aae21d025950c80c52d5a8437542db"} Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.795240 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c923373ee28abfdc6e5f73bb265c04a73aae21d025950c80c52d5a8437542db" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.795299 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-c6d4d" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.803539 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b96d68d3-ed92-40be-bfed-6143b3cdca02","Type":"ContainerStarted","Data":"8ed5b5bcf19bc058bd069190f42b6e2824a2136e191c2360d734f8265791dad1"} Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.806885 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" event={"ID":"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613","Type":"ContainerStarted","Data":"b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8"} Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.807794 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.842568 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=40.369620261 podStartE2EDuration="51.842552357s" podCreationTimestamp="2025-11-26 07:04:21 +0000 UTC" firstStartedPulling="2025-11-26 07:04:58.134359554 +0000 UTC m=+994.018247851" lastFinishedPulling="2025-11-26 07:05:09.607291649 +0000 UTC m=+1005.491179947" observedRunningTime="2025-11-26 07:05:12.835797657 +0000 UTC m=+1008.719685954" watchObservedRunningTime="2025-11-26 07:05:12.842552357 +0000 UTC m=+1008.726440654" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.988828 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" podStartSLOduration=2.988798176 podStartE2EDuration="2.988798176s" podCreationTimestamp="2025-11-26 07:05:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:12.855721557 +0000 UTC m=+1008.739609845" watchObservedRunningTime="2025-11-26 07:05:12.988798176 +0000 UTC m=+1008.872686474" Nov 26 07:05:12 crc kubenswrapper[4492]: I1126 07:05:12.996892 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b9fb5b88f-5mpj7"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.065115 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7dffd5bb99-5tdmw"] Nov 26 07:05:13 crc kubenswrapper[4492]: E1126 07:05:13.065642 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d934c9b1-8adf-41d3-9501-d55fffe02fd7" containerName="keystone-db-sync" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.065667 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d934c9b1-8adf-41d3-9501-d55fffe02fd7" containerName="keystone-db-sync" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.065893 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d934c9b1-8adf-41d3-9501-d55fffe02fd7" containerName="keystone-db-sync" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.067009 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.072432 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-gzdq2"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.076252 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.087655 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t7xkz" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.087947 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.088093 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.090100 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.091003 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.129583 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gzdq2"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190526 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-fernet-keys\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190613 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2rnf\" (UniqueName: \"kubernetes.io/projected/7696e706-4bee-4acb-9c67-58a83178c948-kube-api-access-b2rnf\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190639 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-config\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190778 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-sb\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190796 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-config-data\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190812 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-nb\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190830 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl9zp\" (UniqueName: \"kubernetes.io/projected/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-kube-api-access-rl9zp\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190866 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-scripts\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190899 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-credential-keys\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190940 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-dns-svc\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.190971 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-combined-ca-bundle\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.228273 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7dffd5bb99-5tdmw"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.269105 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-7s6sk"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.270596 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.285446 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.285626 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-b2k58" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292222 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-sb\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292253 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-config-data\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292273 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-nb\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292289 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl9zp\" (UniqueName: \"kubernetes.io/projected/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-kube-api-access-rl9zp\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292323 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-scripts\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292352 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-credential-keys\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292374 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-dns-svc\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292403 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-combined-ca-bundle\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292457 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-fernet-keys\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292496 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2rnf\" (UniqueName: \"kubernetes.io/projected/7696e706-4bee-4acb-9c67-58a83178c948-kube-api-access-b2rnf\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.292515 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-config\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.294257 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-sb\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.295357 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-nb\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.296033 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-dns-svc\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.298759 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-config\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.300987 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-config-data\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.301257 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-scripts\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.308542 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-fernet-keys\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.317754 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-combined-ca-bundle\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.330247 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2rnf\" (UniqueName: \"kubernetes.io/projected/7696e706-4bee-4acb-9c67-58a83178c948-kube-api-access-b2rnf\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.330499 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl9zp\" (UniqueName: \"kubernetes.io/projected/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-kube-api-access-rl9zp\") pod \"dnsmasq-dns-7dffd5bb99-5tdmw\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.333486 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-credential-keys\") pod \"keystone-bootstrap-gzdq2\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.348234 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-7s6sk"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.377629 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5795cb96f7-nkq8g"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.379291 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.382045 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.386807 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7dffd5bb99-5tdmw"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.386895 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-mdmf8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.386973 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.387087 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.387201 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.395359 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-config-data\") pod \"heat-db-sync-7s6sk\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.395520 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-combined-ca-bundle\") pod \"heat-db-sync-7s6sk\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.395561 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hb5w\" (UniqueName: \"kubernetes.io/projected/ad2234e1-842b-4bba-bd21-9fb781403667-kube-api-access-6hb5w\") pod \"heat-db-sync-7s6sk\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.396034 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.412135 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5795cb96f7-nkq8g"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.414589 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58658c84dc-4v45z"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.416033 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.423605 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498023 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-config-data\") pod \"heat-db-sync-7s6sk\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498083 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-svc\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498112 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-sb\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498134 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6759f644-95cf-470e-8914-3583d8b0e11d-logs\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498212 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-config\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498249 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-scripts\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498267 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-config-data\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498291 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b98vl\" (UniqueName: \"kubernetes.io/projected/6759f644-95cf-470e-8914-3583d8b0e11d-kube-api-access-b98vl\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498530 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6759f644-95cf-470e-8914-3583d8b0e11d-horizon-secret-key\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498581 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-swift-storage-0\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498630 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-combined-ca-bundle\") pod \"heat-db-sync-7s6sk\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498652 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-nb\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498682 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hb5w\" (UniqueName: \"kubernetes.io/projected/ad2234e1-842b-4bba-bd21-9fb781403667-kube-api-access-6hb5w\") pod \"heat-db-sync-7s6sk\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.498754 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwnjh\" (UniqueName: \"kubernetes.io/projected/d034ecaa-378e-4b07-a17f-0ea8314d8025-kube-api-access-xwnjh\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.503359 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58658c84dc-4v45z"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.504703 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-combined-ca-bundle\") pod \"heat-db-sync-7s6sk\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.517199 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-8w6bv"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.545427 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.549555 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-config-data\") pod \"heat-db-sync-7s6sk\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.572160 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hb5w\" (UniqueName: \"kubernetes.io/projected/ad2234e1-842b-4bba-bd21-9fb781403667-kube-api-access-6hb5w\") pod \"heat-db-sync-7s6sk\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.594377 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-wlk8n" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.594578 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.594706 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.595574 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-7s6sk" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.599527 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-rl9z8"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.600664 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.602614 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b98vl\" (UniqueName: \"kubernetes.io/projected/6759f644-95cf-470e-8914-3583d8b0e11d-kube-api-access-b98vl\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.602734 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6759f644-95cf-470e-8914-3583d8b0e11d-horizon-secret-key\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.602804 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-db-sync-config-data\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.602864 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-swift-storage-0\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.602942 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-config-data\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603004 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-combined-ca-bundle\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603396 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a497bcf3-f8db-4b08-b5e3-33d050f9901a-etc-machine-id\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603478 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-nb\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603539 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-scripts\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603618 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwnjh\" (UniqueName: \"kubernetes.io/projected/d034ecaa-378e-4b07-a17f-0ea8314d8025-kube-api-access-xwnjh\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603673 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5744\" (UniqueName: \"kubernetes.io/projected/a497bcf3-f8db-4b08-b5e3-33d050f9901a-kube-api-access-b5744\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603763 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-svc\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603817 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-sb\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603874 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6759f644-95cf-470e-8914-3583d8b0e11d-logs\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.603963 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-config\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.604020 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-config-data\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.604079 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-scripts\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.605533 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-scripts\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.609286 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.609485 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.609842 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xwhmb" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.610522 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-swift-storage-0\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.611091 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-nb\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.611143 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-sb\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.611900 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-svc\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.612006 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-config\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.612153 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6759f644-95cf-470e-8914-3583d8b0e11d-logs\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.629655 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8w6bv"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.631416 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-config-data\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.643261 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rl9z8"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.644815 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6759f644-95cf-470e-8914-3583d8b0e11d-horizon-secret-key\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.649204 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-9249p"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.650913 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.670548 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-kgvv2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.670976 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.682740 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b98vl\" (UniqueName: \"kubernetes.io/projected/6759f644-95cf-470e-8914-3583d8b0e11d-kube-api-access-b98vl\") pod \"horizon-5795cb96f7-nkq8g\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.701969 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwnjh\" (UniqueName: \"kubernetes.io/projected/d034ecaa-378e-4b07-a17f-0ea8314d8025-kube-api-access-xwnjh\") pod \"dnsmasq-dns-58658c84dc-4v45z\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.706301 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-config\") pod \"neutron-db-sync-rl9z8\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.707018 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-combined-ca-bundle\") pod \"neutron-db-sync-rl9z8\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.707118 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-db-sync-config-data\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.707166 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpkvl\" (UniqueName: \"kubernetes.io/projected/0afbb611-57c8-4d5a-a258-cc184185d75c-kube-api-access-lpkvl\") pod \"neutron-db-sync-rl9z8\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.707214 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-config-data\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.707238 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-combined-ca-bundle\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.707260 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a497bcf3-f8db-4b08-b5e3-33d050f9901a-etc-machine-id\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.707304 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-scripts\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.707371 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5744\" (UniqueName: \"kubernetes.io/projected/a497bcf3-f8db-4b08-b5e3-33d050f9901a-kube-api-access-b5744\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.719252 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-9249p"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.719647 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a497bcf3-f8db-4b08-b5e3-33d050f9901a-etc-machine-id\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.723323 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-config-data\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.732713 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-scripts\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.733001 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.734597 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-db-sync-config-data\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.748232 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-combined-ca-bundle\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.749277 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5744\" (UniqueName: \"kubernetes.io/projected/a497bcf3-f8db-4b08-b5e3-33d050f9901a-kube-api-access-b5744\") pod \"cinder-db-sync-8w6bv\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.772233 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6688b9cf7f-qsr5d"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.773723 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.811515 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6688b9cf7f-qsr5d"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.827191 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-db-sync-config-data\") pod \"barbican-db-sync-9249p\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.827229 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-combined-ca-bundle\") pod \"neutron-db-sync-rl9z8\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.827401 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpkvl\" (UniqueName: \"kubernetes.io/projected/0afbb611-57c8-4d5a-a258-cc184185d75c-kube-api-access-lpkvl\") pod \"neutron-db-sync-rl9z8\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.827436 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsjsf\" (UniqueName: \"kubernetes.io/projected/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-kube-api-access-bsjsf\") pod \"barbican-db-sync-9249p\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.827575 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-combined-ca-bundle\") pod \"barbican-db-sync-9249p\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.827624 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-config\") pod \"neutron-db-sync-rl9z8\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.834999 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-combined-ca-bundle\") pod \"neutron-db-sync-rl9z8\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.853464 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-config\") pod \"neutron-db-sync-rl9z8\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.883229 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-79hj2"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.884430 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.885631 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.891987 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.903716 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-c7hqh" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.906420 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.906601 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.912504 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpkvl\" (UniqueName: \"kubernetes.io/projected/0afbb611-57c8-4d5a-a258-cc184185d75c-kube-api-access-lpkvl\") pod \"neutron-db-sync-rl9z8\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.923061 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-79hj2"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.936592 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58658c84dc-4v45z"] Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.938133 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-combined-ca-bundle\") pod \"barbican-db-sync-9249p\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.938158 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-config-data\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.938228 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x787c\" (UniqueName: \"kubernetes.io/projected/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-kube-api-access-x787c\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.938267 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-logs\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.938290 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-db-sync-config-data\") pod \"barbican-db-sync-9249p\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.938312 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-horizon-secret-key\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.938340 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-scripts\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.938392 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsjsf\" (UniqueName: \"kubernetes.io/projected/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-kube-api-access-bsjsf\") pod \"barbican-db-sync-9249p\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.943045 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.944800 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-db-sync-config-data\") pod \"barbican-db-sync-9249p\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:13 crc kubenswrapper[4492]: I1126 07:05:13.955222 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-combined-ca-bundle\") pod \"barbican-db-sync-9249p\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.002530 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsjsf\" (UniqueName: \"kubernetes.io/projected/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-kube-api-access-bsjsf\") pod \"barbican-db-sync-9249p\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.048858 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-9249p" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.049880 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x787c\" (UniqueName: \"kubernetes.io/projected/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-kube-api-access-x787c\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.049982 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-config-data\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.050052 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-logs\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.050130 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-horizon-secret-key\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.050158 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-scripts\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.050239 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-logs\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.050340 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-config-data\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.050366 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-combined-ca-bundle\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.050482 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-scripts\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.050508 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bt6p\" (UniqueName: \"kubernetes.io/projected/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-kube-api-access-4bt6p\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.056369 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d4b7d55d5-brf4j"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.057275 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-logs\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.062429 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-scripts\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.063609 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-config-data\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.066855 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.067572 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.075252 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-horizon-secret-key\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.080379 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.080643 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x787c\" (UniqueName: \"kubernetes.io/projected/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-kube-api-access-x787c\") pod \"horizon-6688b9cf7f-qsr5d\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.087316 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.088060 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.114096 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.150830 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d4b7d55d5-brf4j"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153190 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-svc\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153242 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-config-data\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153267 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-log-httpd\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153325 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-swift-storage-0\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153354 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-nb\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153384 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6gbf\" (UniqueName: \"kubernetes.io/projected/b74aeb41-307b-4cca-a876-985ea2601650-kube-api-access-p6gbf\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153419 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-config-data\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153444 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153473 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-scripts\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153497 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-config\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153530 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-logs\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153572 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-run-httpd\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153594 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbm4n\" (UniqueName: \"kubernetes.io/projected/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-kube-api-access-xbm4n\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153625 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-sb\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153676 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153698 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-combined-ca-bundle\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153724 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-scripts\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.153741 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bt6p\" (UniqueName: \"kubernetes.io/projected/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-kube-api-access-4bt6p\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.154858 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-logs\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.183735 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bt6p\" (UniqueName: \"kubernetes.io/projected/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-kube-api-access-4bt6p\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.184948 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-combined-ca-bundle\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.185470 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-config-data\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.188612 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-scripts\") pod \"placement-db-sync-79hj2\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.198983 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.221242 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.223037 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.231707 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.231727 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.232048 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.232731 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c7w8z" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.246840 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258631 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-swift-storage-0\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258692 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-nb\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258723 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6gbf\" (UniqueName: \"kubernetes.io/projected/b74aeb41-307b-4cca-a876-985ea2601650-kube-api-access-p6gbf\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258760 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-config-data\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258820 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258844 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-scripts\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258881 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-config\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258904 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258943 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-config-data\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.258992 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-run-httpd\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259017 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbm4n\" (UniqueName: \"kubernetes.io/projected/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-kube-api-access-xbm4n\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259044 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259071 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-sb\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259097 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8bdq\" (UniqueName: \"kubernetes.io/projected/a8896338-00d7-4330-ba14-3a930d07b9c0-kube-api-access-m8bdq\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259133 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259164 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-logs\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259215 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-svc\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259240 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259261 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-log-httpd\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.259300 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-scripts\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.261043 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-run-httpd\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.261160 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-swift-storage-0\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.261607 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-nb\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.270860 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-sb\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.271788 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-config-data\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.272355 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.272416 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-config\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.273200 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-svc\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.273422 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-log-httpd\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.273915 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.282725 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.283459 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.287076 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.290540 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-scripts\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.316231 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.328109 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6gbf\" (UniqueName: \"kubernetes.io/projected/b74aeb41-307b-4cca-a876-985ea2601650-kube-api-access-p6gbf\") pod \"dnsmasq-dns-d4b7d55d5-brf4j\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.360203 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.360241 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-config-data\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.360281 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.360307 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8bdq\" (UniqueName: \"kubernetes.io/projected/a8896338-00d7-4330-ba14-3a930d07b9c0-kube-api-access-m8bdq\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.360336 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-logs\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.360357 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.360382 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-scripts\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.361085 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.362358 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-logs\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.364351 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.370767 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-scripts\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.389667 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbm4n\" (UniqueName: \"kubernetes.io/projected/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-kube-api-access-xbm4n\") pod \"ceilometer-0\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.392091 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-config-data\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.407364 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.415578 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.447018 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.449016 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.457675 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8bdq\" (UniqueName: \"kubernetes.io/projected/a8896338-00d7-4330-ba14-3a930d07b9c0-kube-api-access-m8bdq\") pod \"glance-default-external-api-0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.463640 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.464038 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-logs\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.464143 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsl64\" (UniqueName: \"kubernetes.io/projected/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-kube-api-access-jsl64\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.464280 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.464381 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.464473 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.464604 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.477684 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gzdq2"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.545608 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-7s6sk"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.548035 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.566927 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.567000 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.567088 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-logs\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.567107 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsl64\" (UniqueName: \"kubernetes.io/projected/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-kube-api-access-jsl64\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.567154 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.567185 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.567214 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.572443 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-logs\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.573613 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.573745 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.588886 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.599423 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.613636 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.617086 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsl64\" (UniqueName: \"kubernetes.io/projected/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-kube-api-access-jsl64\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.630106 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.707659 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7dffd5bb99-5tdmw"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.747101 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5795cb96f7-nkq8g"] Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.845592 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.868317 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gzdq2" event={"ID":"7696e706-4bee-4acb-9c67-58a83178c948","Type":"ContainerStarted","Data":"688e1ee5135a74741f4ad3c8139cc1c161a858d58e4c1bbb4a48916b444dc333"} Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.868365 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gzdq2" event={"ID":"7696e706-4bee-4acb-9c67-58a83178c948","Type":"ContainerStarted","Data":"271a2a895637c378464464f10c8035a4d4e20a11d7e0dac98f94f8bea1ef7b7c"} Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.892504 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" event={"ID":"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b","Type":"ContainerStarted","Data":"031dd2a844d77abb2faf5a4b2133d53e6e6a990bae36ae9206a5430fe4b5857d"} Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.898221 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" podUID="2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" containerName="dnsmasq-dns" containerID="cri-o://b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8" gracePeriod=10 Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.898241 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-7s6sk" event={"ID":"ad2234e1-842b-4bba-bd21-9fb781403667","Type":"ContainerStarted","Data":"9d4c526a656e05501c5bc26158393c7caa6649bcc4ba40bf9d4ea6f5ef35edf4"} Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.899936 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-gzdq2" podStartSLOduration=1.89990776 podStartE2EDuration="1.89990776s" podCreationTimestamp="2025-11-26 07:05:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:14.883479693 +0000 UTC m=+1010.767367992" watchObservedRunningTime="2025-11-26 07:05:14.89990776 +0000 UTC m=+1010.783796058" Nov 26 07:05:14 crc kubenswrapper[4492]: I1126 07:05:14.936668 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58658c84dc-4v45z"] Nov 26 07:05:14 crc kubenswrapper[4492]: W1126 07:05:14.991733 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd034ecaa_378e_4b07_a17f_0ea8314d8025.slice/crio-abb77ecfefd47fffb966ace47da6bfd880be67bd347160d36d1912ad585a5bdc WatchSource:0}: Error finding container abb77ecfefd47fffb966ace47da6bfd880be67bd347160d36d1912ad585a5bdc: Status 404 returned error can't find the container with id abb77ecfefd47fffb966ace47da6bfd880be67bd347160d36d1912ad585a5bdc Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.389904 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-9249p"] Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.469987 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8w6bv"] Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.509993 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-79hj2"] Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.555789 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6688b9cf7f-qsr5d"] Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.612862 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rl9z8"] Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.634783 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.780881 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d4b7d55d5-brf4j"] Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.878076 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.908188 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.916770 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc","Type":"ContainerStarted","Data":"e6e216cf30710e9bc2b45282a9035bf0b72e3a7a662e2cf9b4e9522b299f5726"} Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.917631 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8w6bv" event={"ID":"a497bcf3-f8db-4b08-b5e3-33d050f9901a","Type":"ContainerStarted","Data":"ec11777edaeb307bb33ebe64ba4cf403076ebd787c82dea426689828c7cf0589"} Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.918626 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" event={"ID":"b74aeb41-307b-4cca-a876-985ea2601650","Type":"ContainerStarted","Data":"b760819037d70e8a40944e08cf06b9bfdf0d7287234f9a936de39abc1b8996d1"} Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.924155 4492 generic.go:334] "Generic (PLEG): container finished" podID="d034ecaa-378e-4b07-a17f-0ea8314d8025" containerID="2e29c7fa57c36274a048a134da5b60bed63eafe5b8c0841750b80884aa5ef546" exitCode=0 Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.924249 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58658c84dc-4v45z" event={"ID":"d034ecaa-378e-4b07-a17f-0ea8314d8025","Type":"ContainerDied","Data":"2e29c7fa57c36274a048a134da5b60bed63eafe5b8c0841750b80884aa5ef546"} Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.924277 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58658c84dc-4v45z" event={"ID":"d034ecaa-378e-4b07-a17f-0ea8314d8025","Type":"ContainerStarted","Data":"abb77ecfefd47fffb966ace47da6bfd880be67bd347160d36d1912ad585a5bdc"} Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.933844 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-79hj2" event={"ID":"0093dcb6-c7e5-4b5a-94a3-55fc7465109a","Type":"ContainerStarted","Data":"0d2115bb750477e4e896596a267d9b7048f45aca4a72cf2f70c95eeec466b2d1"} Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.959154 4492 generic.go:334] "Generic (PLEG): container finished" podID="2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" containerID="b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8" exitCode=0 Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.959248 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" event={"ID":"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613","Type":"ContainerDied","Data":"b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8"} Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.959282 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" event={"ID":"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613","Type":"ContainerDied","Data":"e88edd4a7dc2328ebcc4a2c26f8752ca251462588dd0e47de5c7d81f37b48c08"} Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.959300 4492 scope.go:117] "RemoveContainer" containerID="b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8" Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.959445 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b9fb5b88f-5mpj7" Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.990489 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6688b9cf7f-qsr5d" event={"ID":"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e","Type":"ContainerStarted","Data":"446202fa41cc55dab0a5a49c4fbdd025274941ac8d22ff4c581e59596786030f"} Nov 26 07:05:15 crc kubenswrapper[4492]: I1126 07:05:15.993508 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-9249p" event={"ID":"e34f6949-eab2-4b97-9ba1-54ed3e59da5c","Type":"ContainerStarted","Data":"f4341e56969edce48aaf4d606b45d52a7bdd10cee1b288d590e3ce4343963540"} Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.000443 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rl9z8" event={"ID":"0afbb611-57c8-4d5a-a258-cc184185d75c","Type":"ContainerStarted","Data":"a47769566775605c1729f6e47025939de11d410fa967fa5364914970d6d89eea"} Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.000479 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rl9z8" event={"ID":"0afbb611-57c8-4d5a-a258-cc184185d75c","Type":"ContainerStarted","Data":"65c45d6f16c5fbac2898f5c85fac1545c7f4c7e63ad733bc134ed926e74d4d8a"} Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.017118 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-rl9z8" podStartSLOduration=3.017101355 podStartE2EDuration="3.017101355s" podCreationTimestamp="2025-11-26 07:05:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:16.014464376 +0000 UTC m=+1011.898352674" watchObservedRunningTime="2025-11-26 07:05:16.017101355 +0000 UTC m=+1011.900989653" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.029982 4492 generic.go:334] "Generic (PLEG): container finished" podID="fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" containerID="fca536b274dd26171eb9935944515bb9fd09e3ca4a49e432b81634805a5f57f0" exitCode=0 Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.030047 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" event={"ID":"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b","Type":"ContainerDied","Data":"fca536b274dd26171eb9935944515bb9fd09e3ca4a49e432b81634805a5f57f0"} Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.038838 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5795cb96f7-nkq8g" event={"ID":"6759f644-95cf-470e-8914-3583d8b0e11d","Type":"ContainerStarted","Data":"374b88987c576feed8070e17b31c07a65dbe509e5ddecec00f8cec2cb0e55f11"} Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.040134 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-sb\") pod \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.040249 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-config\") pod \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.040285 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-dns-svc\") pod \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.040597 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q46bg\" (UniqueName: \"kubernetes.io/projected/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-kube-api-access-q46bg\") pod \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.041527 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-nb\") pod \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\" (UID: \"2f5bbf95-5ea6-4d2f-becb-7d45e5a07613\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.055397 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-kube-api-access-q46bg" (OuterVolumeSpecName: "kube-api-access-q46bg") pod "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" (UID: "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613"). InnerVolumeSpecName "kube-api-access-q46bg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.094832 4492 scope.go:117] "RemoveContainer" containerID="ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.101501 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" (UID: "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.141166 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" (UID: "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.144014 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.144041 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q46bg\" (UniqueName: \"kubernetes.io/projected/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-kube-api-access-q46bg\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.144052 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.146107 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" (UID: "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.155051 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-config" (OuterVolumeSpecName: "config") pod "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" (UID: "2f5bbf95-5ea6-4d2f-becb-7d45e5a07613"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.189447 4492 scope.go:117] "RemoveContainer" containerID="b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8" Nov 26 07:05:16 crc kubenswrapper[4492]: E1126 07:05:16.193455 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8\": container with ID starting with b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8 not found: ID does not exist" containerID="b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.193490 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8"} err="failed to get container status \"b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8\": rpc error: code = NotFound desc = could not find container \"b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8\": container with ID starting with b1d6ba808b58c654020edf9ee389faca3ae9ecdf18cf7ff3e818ee81d9761ed8 not found: ID does not exist" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.193511 4492 scope.go:117] "RemoveContainer" containerID="ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb" Nov 26 07:05:16 crc kubenswrapper[4492]: E1126 07:05:16.195025 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb\": container with ID starting with ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb not found: ID does not exist" containerID="ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.195216 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb"} err="failed to get container status \"ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb\": rpc error: code = NotFound desc = could not find container \"ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb\": container with ID starting with ffb7b8adffc27d54c06d5e60fcd5abc5fd41cb9180f49fb20d2b63de60416bfb not found: ID does not exist" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.247959 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.247984 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.335306 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b9fb5b88f-5mpj7"] Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.376968 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7b9fb5b88f-5mpj7"] Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.450445 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" path="/var/lib/kubelet/pods/2f5bbf95-5ea6-4d2f-becb-7d45e5a07613/volumes" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.505526 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.509580 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.569716 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-dns-svc\") pod \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.569786 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-config\") pod \"d034ecaa-378e-4b07-a17f-0ea8314d8025\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.569828 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwnjh\" (UniqueName: \"kubernetes.io/projected/d034ecaa-378e-4b07-a17f-0ea8314d8025-kube-api-access-xwnjh\") pod \"d034ecaa-378e-4b07-a17f-0ea8314d8025\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.569904 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-svc\") pod \"d034ecaa-378e-4b07-a17f-0ea8314d8025\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.569964 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-swift-storage-0\") pod \"d034ecaa-378e-4b07-a17f-0ea8314d8025\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.570041 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-nb\") pod \"d034ecaa-378e-4b07-a17f-0ea8314d8025\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.570099 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-sb\") pod \"d034ecaa-378e-4b07-a17f-0ea8314d8025\" (UID: \"d034ecaa-378e-4b07-a17f-0ea8314d8025\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.570143 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-config\") pod \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.570200 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rl9zp\" (UniqueName: \"kubernetes.io/projected/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-kube-api-access-rl9zp\") pod \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.570234 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-nb\") pod \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.570278 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-sb\") pod \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\" (UID: \"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b\") " Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.581386 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-kube-api-access-rl9zp" (OuterVolumeSpecName: "kube-api-access-rl9zp") pod "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" (UID: "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b"). InnerVolumeSpecName "kube-api-access-rl9zp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.590184 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d034ecaa-378e-4b07-a17f-0ea8314d8025-kube-api-access-xwnjh" (OuterVolumeSpecName: "kube-api-access-xwnjh") pod "d034ecaa-378e-4b07-a17f-0ea8314d8025" (UID: "d034ecaa-378e-4b07-a17f-0ea8314d8025"). InnerVolumeSpecName "kube-api-access-xwnjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.616548 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" (UID: "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.617449 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d034ecaa-378e-4b07-a17f-0ea8314d8025" (UID: "d034ecaa-378e-4b07-a17f-0ea8314d8025"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.627065 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" (UID: "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.638122 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d034ecaa-378e-4b07-a17f-0ea8314d8025" (UID: "d034ecaa-378e-4b07-a17f-0ea8314d8025"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.638653 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-config" (OuterVolumeSpecName: "config") pod "d034ecaa-378e-4b07-a17f-0ea8314d8025" (UID: "d034ecaa-378e-4b07-a17f-0ea8314d8025"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.658980 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d034ecaa-378e-4b07-a17f-0ea8314d8025" (UID: "d034ecaa-378e-4b07-a17f-0ea8314d8025"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.669513 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-config" (OuterVolumeSpecName: "config") pod "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" (UID: "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.675785 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" (UID: "fe2b9f14-cbbd-47d8-8c43-116aecf08b1b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676157 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676204 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676215 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwnjh\" (UniqueName: \"kubernetes.io/projected/d034ecaa-378e-4b07-a17f-0ea8314d8025-kube-api-access-xwnjh\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676228 4492 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676237 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676245 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676253 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676262 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rl9zp\" (UniqueName: \"kubernetes.io/projected/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-kube-api-access-rl9zp\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676272 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.676281 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.684424 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d034ecaa-378e-4b07-a17f-0ea8314d8025" (UID: "d034ecaa-378e-4b07-a17f-0ea8314d8025"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.771973 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:16 crc kubenswrapper[4492]: I1126 07:05:16.778870 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d034ecaa-378e-4b07-a17f-0ea8314d8025-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.060307 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.060320 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7dffd5bb99-5tdmw" event={"ID":"fe2b9f14-cbbd-47d8-8c43-116aecf08b1b","Type":"ContainerDied","Data":"031dd2a844d77abb2faf5a4b2133d53e6e6a990bae36ae9206a5430fe4b5857d"} Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.060425 4492 scope.go:117] "RemoveContainer" containerID="fca536b274dd26171eb9935944515bb9fd09e3ca4a49e432b81634805a5f57f0" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.103524 4492 generic.go:334] "Generic (PLEG): container finished" podID="b74aeb41-307b-4cca-a876-985ea2601650" containerID="dc9e0c2936235b678729713d959a42dedc964504eff3b369f54d38e4385d29ad" exitCode=0 Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.103603 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" event={"ID":"b74aeb41-307b-4cca-a876-985ea2601650","Type":"ContainerDied","Data":"dc9e0c2936235b678729713d959a42dedc964504eff3b369f54d38e4385d29ad"} Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.110088 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1","Type":"ContainerStarted","Data":"5bd4327fc7b5ee5ed23538e4a3c65718aa71901419efa9891ccb9c000d4ea86c"} Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.130673 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7dffd5bb99-5tdmw"] Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.144052 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58658c84dc-4v45z" event={"ID":"d034ecaa-378e-4b07-a17f-0ea8314d8025","Type":"ContainerDied","Data":"abb77ecfefd47fffb966ace47da6bfd880be67bd347160d36d1912ad585a5bdc"} Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.144163 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58658c84dc-4v45z" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.145068 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7dffd5bb99-5tdmw"] Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.236279 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8896338-00d7-4330-ba14-3a930d07b9c0","Type":"ContainerStarted","Data":"fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9"} Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.236500 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8896338-00d7-4330-ba14-3a930d07b9c0","Type":"ContainerStarted","Data":"5b653d6d7c51975475a9d6644992d0bb7aa94c1fe4a8da14d50b65cb1839b5eb"} Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.244066 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5795cb96f7-nkq8g"] Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.336297 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58658c84dc-4v45z"] Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.374345 4492 scope.go:117] "RemoveContainer" containerID="2e29c7fa57c36274a048a134da5b60bed63eafe5b8c0841750b80884aa5ef546" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.393321 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58658c84dc-4v45z"] Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.419010 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-644687db57-zx74g"] Nov 26 07:05:17 crc kubenswrapper[4492]: E1126 07:05:17.419513 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" containerName="dnsmasq-dns" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.419530 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" containerName="dnsmasq-dns" Nov 26 07:05:17 crc kubenswrapper[4492]: E1126 07:05:17.419552 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" containerName="init" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.419558 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" containerName="init" Nov 26 07:05:17 crc kubenswrapper[4492]: E1126 07:05:17.419577 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d034ecaa-378e-4b07-a17f-0ea8314d8025" containerName="init" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.419584 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d034ecaa-378e-4b07-a17f-0ea8314d8025" containerName="init" Nov 26 07:05:17 crc kubenswrapper[4492]: E1126 07:05:17.419608 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" containerName="init" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.419616 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" containerName="init" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.419790 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" containerName="init" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.419809 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f5bbf95-5ea6-4d2f-becb-7d45e5a07613" containerName="dnsmasq-dns" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.419822 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d034ecaa-378e-4b07-a17f-0ea8314d8025" containerName="init" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.420947 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.446475 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.455679 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.480231 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-644687db57-zx74g"] Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.497892 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4316787-8af1-40ef-995a-0f8aabd1bf11-logs\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.498314 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-scripts\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.498405 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c4316787-8af1-40ef-995a-0f8aabd1bf11-horizon-secret-key\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.499240 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-config-data\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.499396 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2vbx\" (UniqueName: \"kubernetes.io/projected/c4316787-8af1-40ef-995a-0f8aabd1bf11-kube-api-access-s2vbx\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.537226 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.604167 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2vbx\" (UniqueName: \"kubernetes.io/projected/c4316787-8af1-40ef-995a-0f8aabd1bf11-kube-api-access-s2vbx\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.605597 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4316787-8af1-40ef-995a-0f8aabd1bf11-logs\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.605664 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-scripts\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.606730 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4316787-8af1-40ef-995a-0f8aabd1bf11-logs\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.607644 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-scripts\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.610550 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c4316787-8af1-40ef-995a-0f8aabd1bf11-horizon-secret-key\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.610635 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-config-data\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.611593 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-config-data\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.649554 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c4316787-8af1-40ef-995a-0f8aabd1bf11-horizon-secret-key\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.664581 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2vbx\" (UniqueName: \"kubernetes.io/projected/c4316787-8af1-40ef-995a-0f8aabd1bf11-kube-api-access-s2vbx\") pod \"horizon-644687db57-zx74g\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:17 crc kubenswrapper[4492]: I1126 07:05:17.773882 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:18 crc kubenswrapper[4492]: I1126 07:05:18.306081 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1","Type":"ContainerStarted","Data":"6ed660ae76bb03fcdb60ad10cab39cce896c0b44698ded3973c28fc7db5ffffe"} Nov 26 07:05:18 crc kubenswrapper[4492]: I1126 07:05:18.306348 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-644687db57-zx74g"] Nov 26 07:05:18 crc kubenswrapper[4492]: I1126 07:05:18.321605 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" event={"ID":"b74aeb41-307b-4cca-a876-985ea2601650","Type":"ContainerStarted","Data":"aa78c3c3c0b9f516e77a60b0ed36169938e0144e9280774dd0d992da1a133746"} Nov 26 07:05:18 crc kubenswrapper[4492]: I1126 07:05:18.322083 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:18 crc kubenswrapper[4492]: I1126 07:05:18.346689 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" podStartSLOduration=5.3466707620000005 podStartE2EDuration="5.346670762s" podCreationTimestamp="2025-11-26 07:05:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:18.340589098 +0000 UTC m=+1014.224477395" watchObservedRunningTime="2025-11-26 07:05:18.346670762 +0000 UTC m=+1014.230559050" Nov 26 07:05:18 crc kubenswrapper[4492]: I1126 07:05:18.449514 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d034ecaa-378e-4b07-a17f-0ea8314d8025" path="/var/lib/kubelet/pods/d034ecaa-378e-4b07-a17f-0ea8314d8025/volumes" Nov 26 07:05:18 crc kubenswrapper[4492]: I1126 07:05:18.450181 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe2b9f14-cbbd-47d8-8c43-116aecf08b1b" path="/var/lib/kubelet/pods/fe2b9f14-cbbd-47d8-8c43-116aecf08b1b/volumes" Nov 26 07:05:19 crc kubenswrapper[4492]: I1126 07:05:19.333959 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8896338-00d7-4330-ba14-3a930d07b9c0","Type":"ContainerStarted","Data":"c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded"} Nov 26 07:05:19 crc kubenswrapper[4492]: I1126 07:05:19.334266 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerName="glance-log" containerID="cri-o://fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9" gracePeriod=30 Nov 26 07:05:19 crc kubenswrapper[4492]: I1126 07:05:19.334329 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerName="glance-httpd" containerID="cri-o://c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded" gracePeriod=30 Nov 26 07:05:19 crc kubenswrapper[4492]: I1126 07:05:19.337493 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-644687db57-zx74g" event={"ID":"c4316787-8af1-40ef-995a-0f8aabd1bf11","Type":"ContainerStarted","Data":"33504fda659323afc030cf03ee19e44ce5ba97ff096c93e24ce830ad8553392e"} Nov 26 07:05:19 crc kubenswrapper[4492]: I1126 07:05:19.353474 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.353460406 podStartE2EDuration="6.353460406s" podCreationTimestamp="2025-11-26 07:05:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:19.350028162 +0000 UTC m=+1015.233916460" watchObservedRunningTime="2025-11-26 07:05:19.353460406 +0000 UTC m=+1015.237348703" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.002324 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.076667 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-config-data\") pod \"a8896338-00d7-4330-ba14-3a930d07b9c0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.076783 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-httpd-run\") pod \"a8896338-00d7-4330-ba14-3a930d07b9c0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.076951 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"a8896338-00d7-4330-ba14-3a930d07b9c0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.077026 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-combined-ca-bundle\") pod \"a8896338-00d7-4330-ba14-3a930d07b9c0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.077144 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-logs\") pod \"a8896338-00d7-4330-ba14-3a930d07b9c0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.077253 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8bdq\" (UniqueName: \"kubernetes.io/projected/a8896338-00d7-4330-ba14-3a930d07b9c0-kube-api-access-m8bdq\") pod \"a8896338-00d7-4330-ba14-3a930d07b9c0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.077289 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-scripts\") pod \"a8896338-00d7-4330-ba14-3a930d07b9c0\" (UID: \"a8896338-00d7-4330-ba14-3a930d07b9c0\") " Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.077526 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a8896338-00d7-4330-ba14-3a930d07b9c0" (UID: "a8896338-00d7-4330-ba14-3a930d07b9c0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.077985 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-logs" (OuterVolumeSpecName: "logs") pod "a8896338-00d7-4330-ba14-3a930d07b9c0" (UID: "a8896338-00d7-4330-ba14-3a930d07b9c0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.078322 4492 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.078349 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8896338-00d7-4330-ba14-3a930d07b9c0-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.088747 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-scripts" (OuterVolumeSpecName: "scripts") pod "a8896338-00d7-4330-ba14-3a930d07b9c0" (UID: "a8896338-00d7-4330-ba14-3a930d07b9c0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.088957 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "a8896338-00d7-4330-ba14-3a930d07b9c0" (UID: "a8896338-00d7-4330-ba14-3a930d07b9c0"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.089670 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8896338-00d7-4330-ba14-3a930d07b9c0-kube-api-access-m8bdq" (OuterVolumeSpecName: "kube-api-access-m8bdq") pod "a8896338-00d7-4330-ba14-3a930d07b9c0" (UID: "a8896338-00d7-4330-ba14-3a930d07b9c0"). InnerVolumeSpecName "kube-api-access-m8bdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.141050 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8896338-00d7-4330-ba14-3a930d07b9c0" (UID: "a8896338-00d7-4330-ba14-3a930d07b9c0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.190336 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8bdq\" (UniqueName: \"kubernetes.io/projected/a8896338-00d7-4330-ba14-3a930d07b9c0-kube-api-access-m8bdq\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.190375 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.190409 4492 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.190421 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.216005 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-config-data" (OuterVolumeSpecName: "config-data") pod "a8896338-00d7-4330-ba14-3a930d07b9c0" (UID: "a8896338-00d7-4330-ba14-3a930d07b9c0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.219014 4492 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.297140 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8896338-00d7-4330-ba14-3a930d07b9c0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.297186 4492 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.375867 4492 generic.go:334] "Generic (PLEG): container finished" podID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerID="c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded" exitCode=0 Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.375904 4492 generic.go:334] "Generic (PLEG): container finished" podID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerID="fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9" exitCode=143 Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.375971 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8896338-00d7-4330-ba14-3a930d07b9c0","Type":"ContainerDied","Data":"c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded"} Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.376005 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8896338-00d7-4330-ba14-3a930d07b9c0","Type":"ContainerDied","Data":"fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9"} Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.376018 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8896338-00d7-4330-ba14-3a930d07b9c0","Type":"ContainerDied","Data":"5b653d6d7c51975475a9d6644992d0bb7aa94c1fe4a8da14d50b65cb1839b5eb"} Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.376036 4492 scope.go:117] "RemoveContainer" containerID="c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.376073 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.378186 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1","Type":"ContainerStarted","Data":"9b5b734d7f1b37d0305a38136786f94e4cf5eed08063acaab146c41177295df3"} Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.378290 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerName="glance-log" containerID="cri-o://6ed660ae76bb03fcdb60ad10cab39cce896c0b44698ded3973c28fc7db5ffffe" gracePeriod=30 Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.378375 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerName="glance-httpd" containerID="cri-o://9b5b734d7f1b37d0305a38136786f94e4cf5eed08063acaab146c41177295df3" gracePeriod=30 Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.408728 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.408709312 podStartE2EDuration="6.408709312s" podCreationTimestamp="2025-11-26 07:05:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:20.397927209 +0000 UTC m=+1016.281815508" watchObservedRunningTime="2025-11-26 07:05:20.408709312 +0000 UTC m=+1016.292597600" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.514897 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.535238 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.553632 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:20 crc kubenswrapper[4492]: E1126 07:05:20.554359 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerName="glance-httpd" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.554375 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerName="glance-httpd" Nov 26 07:05:20 crc kubenswrapper[4492]: E1126 07:05:20.554394 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerName="glance-log" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.554400 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerName="glance-log" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.554612 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerName="glance-log" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.554628 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8896338-00d7-4330-ba14-3a930d07b9c0" containerName="glance-httpd" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.557687 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.560797 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.567600 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.596269 4492 scope.go:117] "RemoveContainer" containerID="fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.687360 4492 scope.go:117] "RemoveContainer" containerID="c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded" Nov 26 07:05:20 crc kubenswrapper[4492]: E1126 07:05:20.688340 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded\": container with ID starting with c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded not found: ID does not exist" containerID="c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.688412 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded"} err="failed to get container status \"c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded\": rpc error: code = NotFound desc = could not find container \"c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded\": container with ID starting with c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded not found: ID does not exist" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.688452 4492 scope.go:117] "RemoveContainer" containerID="fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9" Nov 26 07:05:20 crc kubenswrapper[4492]: E1126 07:05:20.696185 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9\": container with ID starting with fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9 not found: ID does not exist" containerID="fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.696225 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9"} err="failed to get container status \"fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9\": rpc error: code = NotFound desc = could not find container \"fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9\": container with ID starting with fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9 not found: ID does not exist" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.696255 4492 scope.go:117] "RemoveContainer" containerID="c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.698842 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded"} err="failed to get container status \"c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded\": rpc error: code = NotFound desc = could not find container \"c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded\": container with ID starting with c9315f15ff3bb349f8aab0ce10caa288ee97333ef8bafa9d06f548f0f4b87ded not found: ID does not exist" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.698862 4492 scope.go:117] "RemoveContainer" containerID="fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.699805 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9"} err="failed to get container status \"fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9\": rpc error: code = NotFound desc = could not find container \"fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9\": container with ID starting with fd0e848f94a82cec187cdc5d8f5c42760e1c593187c075947a0104d2c76a98d9 not found: ID does not exist" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.723503 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zdls\" (UniqueName: \"kubernetes.io/projected/bd7a8c3f-6281-4b33-a56a-2a93844e584d-kube-api-access-2zdls\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.723535 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-logs\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.723571 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.723616 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.723631 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-scripts\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.723665 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-config-data\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.723693 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.825617 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.825666 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-scripts\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.825733 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-config-data\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.825785 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.825907 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zdls\" (UniqueName: \"kubernetes.io/projected/bd7a8c3f-6281-4b33-a56a-2a93844e584d-kube-api-access-2zdls\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.825933 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-logs\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.825979 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.826634 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.827024 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.827264 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-logs\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.835340 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.837387 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-config-data\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.841215 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-scripts\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.872824 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zdls\" (UniqueName: \"kubernetes.io/projected/bd7a8c3f-6281-4b33-a56a-2a93844e584d-kube-api-access-2zdls\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.873129 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:20 crc kubenswrapper[4492]: I1126 07:05:20.885986 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.410405 4492 generic.go:334] "Generic (PLEG): container finished" podID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerID="9b5b734d7f1b37d0305a38136786f94e4cf5eed08063acaab146c41177295df3" exitCode=0 Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.410434 4492 generic.go:334] "Generic (PLEG): container finished" podID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerID="6ed660ae76bb03fcdb60ad10cab39cce896c0b44698ded3973c28fc7db5ffffe" exitCode=143 Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.410494 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1","Type":"ContainerDied","Data":"9b5b734d7f1b37d0305a38136786f94e4cf5eed08063acaab146c41177295df3"} Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.410522 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1","Type":"ContainerDied","Data":"6ed660ae76bb03fcdb60ad10cab39cce896c0b44698ded3973c28fc7db5ffffe"} Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.419414 4492 generic.go:334] "Generic (PLEG): container finished" podID="7696e706-4bee-4acb-9c67-58a83178c948" containerID="688e1ee5135a74741f4ad3c8139cc1c161a858d58e4c1bbb4a48916b444dc333" exitCode=0 Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.419438 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gzdq2" event={"ID":"7696e706-4bee-4acb-9c67-58a83178c948","Type":"ContainerDied","Data":"688e1ee5135a74741f4ad3c8139cc1c161a858d58e4c1bbb4a48916b444dc333"} Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.475815 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.581850 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:21 crc kubenswrapper[4492]: W1126 07:05:21.596945 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd7a8c3f_6281_4b33_a56a_2a93844e584d.slice/crio-bd7858a4065c31ad310468d7e37fc6d985b4c57ef605ecf45399f6b158e6a53d WatchSource:0}: Error finding container bd7858a4065c31ad310468d7e37fc6d985b4c57ef605ecf45399f6b158e6a53d: Status 404 returned error can't find the container with id bd7858a4065c31ad310468d7e37fc6d985b4c57ef605ecf45399f6b158e6a53d Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.862913 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.952353 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-logs\") pod \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.952393 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-config-data\") pod \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.952514 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-combined-ca-bundle\") pod \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.952605 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-scripts\") pod \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.953696 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-httpd-run\") pod \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.953772 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsl64\" (UniqueName: \"kubernetes.io/projected/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-kube-api-access-jsl64\") pod \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.953817 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\" (UID: \"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1\") " Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.953864 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-logs" (OuterVolumeSpecName: "logs") pod "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" (UID: "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.954037 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" (UID: "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.954430 4492 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.954449 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.960726 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-kube-api-access-jsl64" (OuterVolumeSpecName: "kube-api-access-jsl64") pod "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" (UID: "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1"). InnerVolumeSpecName "kube-api-access-jsl64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.962199 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" (UID: "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:05:21 crc kubenswrapper[4492]: I1126 07:05:21.968503 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-scripts" (OuterVolumeSpecName: "scripts") pod "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" (UID: "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.002614 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" (UID: "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.015147 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-config-data" (OuterVolumeSpecName: "config-data") pod "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" (UID: "0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.060916 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsl64\" (UniqueName: \"kubernetes.io/projected/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-kube-api-access-jsl64\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.061027 4492 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.061050 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.061080 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.061095 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.085514 4492 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.163125 4492 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.461119 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8896338-00d7-4330-ba14-3a930d07b9c0" path="/var/lib/kubelet/pods/a8896338-00d7-4330-ba14-3a930d07b9c0/volumes" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.462267 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bd7a8c3f-6281-4b33-a56a-2a93844e584d","Type":"ContainerStarted","Data":"bd7858a4065c31ad310468d7e37fc6d985b4c57ef605ecf45399f6b158e6a53d"} Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.466411 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1","Type":"ContainerDied","Data":"5bd4327fc7b5ee5ed23538e4a3c65718aa71901419efa9891ccb9c000d4ea86c"} Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.466486 4492 scope.go:117] "RemoveContainer" containerID="9b5b734d7f1b37d0305a38136786f94e4cf5eed08063acaab146c41177295df3" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.466496 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.538043 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.552359 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.576550 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:22 crc kubenswrapper[4492]: E1126 07:05:22.579156 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerName="glance-log" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.579201 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerName="glance-log" Nov 26 07:05:22 crc kubenswrapper[4492]: E1126 07:05:22.579229 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerName="glance-httpd" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.579236 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerName="glance-httpd" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.579700 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerName="glance-httpd" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.579714 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" containerName="glance-log" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.594322 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.597367 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.598386 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.668535 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.682680 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.682977 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.683120 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.684075 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.684475 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.685108 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.685196 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5tnz\" (UniqueName: \"kubernetes.io/projected/0033e995-8279-4229-8ea7-7339427960a8-kube-api-access-h5tnz\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.685480 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-logs\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.790661 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.790723 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.790751 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.790769 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5tnz\" (UniqueName: \"kubernetes.io/projected/0033e995-8279-4229-8ea7-7339427960a8-kube-api-access-h5tnz\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.790807 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-logs\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.790857 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.790916 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.791727 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.793733 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.797400 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.798619 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.798624 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.798905 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-logs\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.798720 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.801961 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.808606 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5tnz\" (UniqueName: \"kubernetes.io/projected/0033e995-8279-4229-8ea7-7339427960a8-kube-api-access-h5tnz\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.825388 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:22 crc kubenswrapper[4492]: I1126 07:05:22.930465 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:23 crc kubenswrapper[4492]: I1126 07:05:23.490162 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bd7a8c3f-6281-4b33-a56a-2a93844e584d","Type":"ContainerStarted","Data":"5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719"} Nov 26 07:05:23 crc kubenswrapper[4492]: I1126 07:05:23.959010 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6688b9cf7f-qsr5d"] Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.028138 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7bb6557f96-rgc7g"] Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.033891 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.041064 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.048887 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7bb6557f96-rgc7g"] Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.135664 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-644687db57-zx74g"] Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.145360 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6b48756c9b-4pxln"] Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.148309 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.150739 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6b48756c9b-4pxln"] Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.164472 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15ec528-9195-4dfe-95b7-e30a44f74b44-logs\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.164525 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-config-data\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.164587 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-combined-ca-bundle\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.164646 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-secret-key\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.164671 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z25fk\" (UniqueName: \"kubernetes.io/projected/a15ec528-9195-4dfe-95b7-e30a44f74b44-kube-api-access-z25fk\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.164690 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-tls-certs\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.164710 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-scripts\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.208842 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268196 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-secret-key\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268262 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z25fk\" (UniqueName: \"kubernetes.io/projected/a15ec528-9195-4dfe-95b7-e30a44f74b44-kube-api-access-z25fk\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268296 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-tls-certs\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268328 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-scripts\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268365 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-horizon-secret-key\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268397 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-horizon-tls-certs\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268436 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndvvk\" (UniqueName: \"kubernetes.io/projected/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-kube-api-access-ndvvk\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268501 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-logs\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268760 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15ec528-9195-4dfe-95b7-e30a44f74b44-logs\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268801 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-scripts\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268842 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-config-data\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.269128 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15ec528-9195-4dfe-95b7-e30a44f74b44-logs\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.269465 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-scripts\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.270069 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-config-data\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.268957 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-combined-ca-bundle\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.271489 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-combined-ca-bundle\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.271564 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-config-data\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.276770 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-tls-certs\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.283758 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-combined-ca-bundle\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.289931 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z25fk\" (UniqueName: \"kubernetes.io/projected/a15ec528-9195-4dfe-95b7-e30a44f74b44-kube-api-access-z25fk\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.300250 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-secret-key\") pod \"horizon-7bb6557f96-rgc7g\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.367345 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.375133 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-horizon-secret-key\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.375194 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-horizon-tls-certs\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.375226 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndvvk\" (UniqueName: \"kubernetes.io/projected/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-kube-api-access-ndvvk\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.375317 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-logs\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.375399 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-scripts\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.375466 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-combined-ca-bundle\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.375574 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-config-data\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.377685 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-logs\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.379045 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-config-data\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.382308 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-scripts\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.382405 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-horizon-tls-certs\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.394468 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-combined-ca-bundle\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.400122 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-horizon-secret-key\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.401686 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndvvk\" (UniqueName: \"kubernetes.io/projected/66834ee2-a38b-4d8d-9195-c4af38dc8a9b-kube-api-access-ndvvk\") pod \"horizon-6b48756c9b-4pxln\" (UID: \"66834ee2-a38b-4d8d-9195-c4af38dc8a9b\") " pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.417393 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.488746 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.535488 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1" path="/var/lib/kubelet/pods/0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1/volumes" Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.554107 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6449747765-nngzp"] Nov 26 07:05:24 crc kubenswrapper[4492]: I1126 07:05:24.554471 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6449747765-nngzp" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="dnsmasq-dns" containerID="cri-o://60384fa21f2d35fc5f7de26f3616131eeca11ebd6345a223a104f8bd041f95a3" gracePeriod=10 Nov 26 07:05:25 crc kubenswrapper[4492]: I1126 07:05:25.608347 4492 generic.go:334] "Generic (PLEG): container finished" podID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerID="60384fa21f2d35fc5f7de26f3616131eeca11ebd6345a223a104f8bd041f95a3" exitCode=0 Nov 26 07:05:25 crc kubenswrapper[4492]: I1126 07:05:25.608430 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6449747765-nngzp" event={"ID":"5bc62996-17f2-4415-a9d5-3219cfb079f9","Type":"ContainerDied","Data":"60384fa21f2d35fc5f7de26f3616131eeca11ebd6345a223a104f8bd041f95a3"} Nov 26 07:05:26 crc kubenswrapper[4492]: I1126 07:05:26.319003 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6449747765-nngzp" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.112:5353: connect: connection refused" Nov 26 07:05:27 crc kubenswrapper[4492]: I1126 07:05:27.641850 4492 generic.go:334] "Generic (PLEG): container finished" podID="0afbb611-57c8-4d5a-a258-cc184185d75c" containerID="a47769566775605c1729f6e47025939de11d410fa967fa5364914970d6d89eea" exitCode=0 Nov 26 07:05:27 crc kubenswrapper[4492]: I1126 07:05:27.641967 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rl9z8" event={"ID":"0afbb611-57c8-4d5a-a258-cc184185d75c","Type":"ContainerDied","Data":"a47769566775605c1729f6e47025939de11d410fa967fa5364914970d6d89eea"} Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.319065 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6449747765-nngzp" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.112:5353: connect: connection refused" Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.566836 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.688600 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rl9z8" event={"ID":"0afbb611-57c8-4d5a-a258-cc184185d75c","Type":"ContainerDied","Data":"65c45d6f16c5fbac2898f5c85fac1545c7f4c7e63ad733bc134ed926e74d4d8a"} Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.688647 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65c45d6f16c5fbac2898f5c85fac1545c7f4c7e63ad733bc134ed926e74d4d8a" Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.688663 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rl9z8" Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.699369 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-config\") pod \"0afbb611-57c8-4d5a-a258-cc184185d75c\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.699455 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-combined-ca-bundle\") pod \"0afbb611-57c8-4d5a-a258-cc184185d75c\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.699518 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpkvl\" (UniqueName: \"kubernetes.io/projected/0afbb611-57c8-4d5a-a258-cc184185d75c-kube-api-access-lpkvl\") pod \"0afbb611-57c8-4d5a-a258-cc184185d75c\" (UID: \"0afbb611-57c8-4d5a-a258-cc184185d75c\") " Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.707773 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0afbb611-57c8-4d5a-a258-cc184185d75c-kube-api-access-lpkvl" (OuterVolumeSpecName: "kube-api-access-lpkvl") pod "0afbb611-57c8-4d5a-a258-cc184185d75c" (UID: "0afbb611-57c8-4d5a-a258-cc184185d75c"). InnerVolumeSpecName "kube-api-access-lpkvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.728894 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0afbb611-57c8-4d5a-a258-cc184185d75c" (UID: "0afbb611-57c8-4d5a-a258-cc184185d75c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.730448 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-config" (OuterVolumeSpecName: "config") pod "0afbb611-57c8-4d5a-a258-cc184185d75c" (UID: "0afbb611-57c8-4d5a-a258-cc184185d75c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.803627 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpkvl\" (UniqueName: \"kubernetes.io/projected/0afbb611-57c8-4d5a-a258-cc184185d75c-kube-api-access-lpkvl\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.803665 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:31 crc kubenswrapper[4492]: I1126 07:05:31.803681 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afbb611-57c8-4d5a-a258-cc184185d75c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.755483 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cb64f9797-5jdj2"] Nov 26 07:05:32 crc kubenswrapper[4492]: E1126 07:05:32.756109 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0afbb611-57c8-4d5a-a258-cc184185d75c" containerName="neutron-db-sync" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.756121 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0afbb611-57c8-4d5a-a258-cc184185d75c" containerName="neutron-db-sync" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.756341 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0afbb611-57c8-4d5a-a258-cc184185d75c" containerName="neutron-db-sync" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.757125 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.775668 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cb64f9797-5jdj2"] Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.868144 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-86bdc94cc6-ws8xc"] Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.869536 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.875578 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xwhmb" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.875649 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.875845 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.876016 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.878693 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86bdc94cc6-ws8xc"] Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.926767 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-swift-storage-0\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.926894 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-nb\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.926956 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-svc\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.926997 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2t8zr\" (UniqueName: \"kubernetes.io/projected/4f397877-f399-472d-a32d-11cb9b87fd73-kube-api-access-2t8zr\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.927020 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-sb\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:32 crc kubenswrapper[4492]: I1126 07:05:32.927040 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-config\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029108 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-458s4\" (UniqueName: \"kubernetes.io/projected/38194db3-f048-45e5-80d6-7dfa8f1f7420-kube-api-access-458s4\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029196 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-ovndb-tls-certs\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029234 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-swift-storage-0\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029267 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-combined-ca-bundle\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029294 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-nb\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029338 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-httpd-config\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029373 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-svc\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029409 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-config\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029438 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2t8zr\" (UniqueName: \"kubernetes.io/projected/4f397877-f399-472d-a32d-11cb9b87fd73-kube-api-access-2t8zr\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029461 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-sb\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.029475 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-config\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.030540 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-nb\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.030551 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-config\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.030803 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-sb\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.030886 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-svc\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.031233 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-swift-storage-0\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.046698 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2t8zr\" (UniqueName: \"kubernetes.io/projected/4f397877-f399-472d-a32d-11cb9b87fd73-kube-api-access-2t8zr\") pod \"dnsmasq-dns-cb64f9797-5jdj2\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.076262 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.133458 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-config\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.133555 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-458s4\" (UniqueName: \"kubernetes.io/projected/38194db3-f048-45e5-80d6-7dfa8f1f7420-kube-api-access-458s4\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.133613 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-ovndb-tls-certs\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.133667 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-combined-ca-bundle\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.133709 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-httpd-config\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.138241 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-config\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.138549 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-httpd-config\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.139322 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-combined-ca-bundle\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.144028 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-ovndb-tls-certs\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.152608 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-458s4\" (UniqueName: \"kubernetes.io/projected/38194db3-f048-45e5-80d6-7dfa8f1f7420-kube-api-access-458s4\") pod \"neutron-86bdc94cc6-ws8xc\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:33 crc kubenswrapper[4492]: I1126 07:05:33.196934 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.818781 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5f9588bddf-km75q"] Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.820295 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.829477 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.829696 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.831106 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f9588bddf-km75q"] Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.884125 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-internal-tls-certs\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.884234 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-config\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.884415 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-ovndb-tls-certs\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.884564 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxnrg\" (UniqueName: \"kubernetes.io/projected/7c875225-0af5-49c4-b273-85ed6c498f18-kube-api-access-rxnrg\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.884615 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-httpd-config\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.884661 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-combined-ca-bundle\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.884693 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-public-tls-certs\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.986455 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxnrg\" (UniqueName: \"kubernetes.io/projected/7c875225-0af5-49c4-b273-85ed6c498f18-kube-api-access-rxnrg\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.986518 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-httpd-config\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.986571 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-combined-ca-bundle\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.986600 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-public-tls-certs\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.986718 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-internal-tls-certs\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.986745 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-config\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.986818 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-ovndb-tls-certs\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.994565 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-httpd-config\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.995239 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-public-tls-certs\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.995547 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-config\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.995857 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-ovndb-tls-certs\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.996317 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-internal-tls-certs\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:34 crc kubenswrapper[4492]: I1126 07:05:34.999894 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-combined-ca-bundle\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:35 crc kubenswrapper[4492]: I1126 07:05:35.001297 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxnrg\" (UniqueName: \"kubernetes.io/projected/7c875225-0af5-49c4-b273-85ed6c498f18-kube-api-access-rxnrg\") pod \"neutron-5f9588bddf-km75q\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:35 crc kubenswrapper[4492]: I1126 07:05:35.143782 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:36 crc kubenswrapper[4492]: I1126 07:05:36.319505 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6449747765-nngzp" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.112:5353: connect: connection refused" Nov 26 07:05:36 crc kubenswrapper[4492]: I1126 07:05:36.320019 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:05:37 crc kubenswrapper[4492]: E1126 07:05:37.830156 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:37 crc kubenswrapper[4492]: E1126 07:05:37.830511 4492 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:37 crc kubenswrapper[4492]: E1126 07:05:37.830678 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:1f5c0439f2433cb462b222a5bb23e629,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n8fh689h567h5b9h55dh5c9h56bhcbh8ch5ddh8dh674h68fh698h78h75hdch5bfh78h688h6bh5c4h58h54fhdfh65h679hf9hbfh85h659h65fq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b98vl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5795cb96f7-nkq8g_openstack(6759f644-95cf-470e-8914-3583d8b0e11d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:05:37 crc kubenswrapper[4492]: E1126 07:05:37.843622 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-horizon:1f5c0439f2433cb462b222a5bb23e629\\\"\"]" pod="openstack/horizon-5795cb96f7-nkq8g" podUID="6759f644-95cf-470e-8914-3583d8b0e11d" Nov 26 07:05:37 crc kubenswrapper[4492]: I1126 07:05:37.891077 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.072221 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-combined-ca-bundle\") pod \"7696e706-4bee-4acb-9c67-58a83178c948\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.072315 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-credential-keys\") pod \"7696e706-4bee-4acb-9c67-58a83178c948\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.073706 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2rnf\" (UniqueName: \"kubernetes.io/projected/7696e706-4bee-4acb-9c67-58a83178c948-kube-api-access-b2rnf\") pod \"7696e706-4bee-4acb-9c67-58a83178c948\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.073781 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-fernet-keys\") pod \"7696e706-4bee-4acb-9c67-58a83178c948\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.073805 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-config-data\") pod \"7696e706-4bee-4acb-9c67-58a83178c948\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.073849 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-scripts\") pod \"7696e706-4bee-4acb-9c67-58a83178c948\" (UID: \"7696e706-4bee-4acb-9c67-58a83178c948\") " Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.081003 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-scripts" (OuterVolumeSpecName: "scripts") pod "7696e706-4bee-4acb-9c67-58a83178c948" (UID: "7696e706-4bee-4acb-9c67-58a83178c948"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.083811 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7696e706-4bee-4acb-9c67-58a83178c948-kube-api-access-b2rnf" (OuterVolumeSpecName: "kube-api-access-b2rnf") pod "7696e706-4bee-4acb-9c67-58a83178c948" (UID: "7696e706-4bee-4acb-9c67-58a83178c948"). InnerVolumeSpecName "kube-api-access-b2rnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.088356 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7696e706-4bee-4acb-9c67-58a83178c948" (UID: "7696e706-4bee-4acb-9c67-58a83178c948"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.099476 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7696e706-4bee-4acb-9c67-58a83178c948" (UID: "7696e706-4bee-4acb-9c67-58a83178c948"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.102421 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-config-data" (OuterVolumeSpecName: "config-data") pod "7696e706-4bee-4acb-9c67-58a83178c948" (UID: "7696e706-4bee-4acb-9c67-58a83178c948"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.102887 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7696e706-4bee-4acb-9c67-58a83178c948" (UID: "7696e706-4bee-4acb-9c67-58a83178c948"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.176854 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.176887 4492 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.176900 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2rnf\" (UniqueName: \"kubernetes.io/projected/7696e706-4bee-4acb-9c67-58a83178c948-kube-api-access-b2rnf\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.176912 4492 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.176933 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.176944 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7696e706-4bee-4acb-9c67-58a83178c948-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.778458 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gzdq2" event={"ID":"7696e706-4bee-4acb-9c67-58a83178c948","Type":"ContainerDied","Data":"271a2a895637c378464464f10c8035a4d4e20a11d7e0dac98f94f8bea1ef7b7c"} Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.778913 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="271a2a895637c378464464f10c8035a4d4e20a11d7e0dac98f94f8bea1ef7b7c" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.778510 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gzdq2" Nov 26 07:05:38 crc kubenswrapper[4492]: I1126 07:05:38.998044 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-gzdq2"] Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.010428 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-gzdq2"] Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.083243 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-hqvtp"] Nov 26 07:05:39 crc kubenswrapper[4492]: E1126 07:05:39.084555 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7696e706-4bee-4acb-9c67-58a83178c948" containerName="keystone-bootstrap" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.084579 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7696e706-4bee-4acb-9c67-58a83178c948" containerName="keystone-bootstrap" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.085019 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7696e706-4bee-4acb-9c67-58a83178c948" containerName="keystone-bootstrap" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.086126 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.090787 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.090821 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.091029 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.091086 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.091357 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t7xkz" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.091710 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hqvtp"] Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.100382 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-fernet-keys\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.100471 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-config-data\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.100511 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-credential-keys\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.100555 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-combined-ca-bundle\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.100607 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vf7wx\" (UniqueName: \"kubernetes.io/projected/dbe81971-8d1f-4681-9bfb-5b13a46a5788-kube-api-access-vf7wx\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.100644 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-scripts\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.202616 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-config-data\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.203187 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-credential-keys\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.203223 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-combined-ca-bundle\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.204322 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vf7wx\" (UniqueName: \"kubernetes.io/projected/dbe81971-8d1f-4681-9bfb-5b13a46a5788-kube-api-access-vf7wx\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.204361 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-scripts\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.204441 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-fernet-keys\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.212085 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-scripts\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.212569 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-fernet-keys\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.212611 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-combined-ca-bundle\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.217978 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-credential-keys\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.222093 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vf7wx\" (UniqueName: \"kubernetes.io/projected/dbe81971-8d1f-4681-9bfb-5b13a46a5788-kube-api-access-vf7wx\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.226675 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-config-data\") pod \"keystone-bootstrap-hqvtp\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:39 crc kubenswrapper[4492]: I1126 07:05:39.408974 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:05:40 crc kubenswrapper[4492]: I1126 07:05:40.451124 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7696e706-4bee-4acb-9c67-58a83178c948" path="/var/lib/kubelet/pods/7696e706-4bee-4acb-9c67-58a83178c948/volumes" Nov 26 07:05:46 crc kubenswrapper[4492]: I1126 07:05:46.319267 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6449747765-nngzp" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.112:5353: i/o timeout" Nov 26 07:05:47 crc kubenswrapper[4492]: E1126 07:05:47.510023 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-heat-engine:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:47 crc kubenswrapper[4492]: E1126 07:05:47.510105 4492 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-heat-engine:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:47 crc kubenswrapper[4492]: E1126 07:05:47.510246 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-heat-engine:1f5c0439f2433cb462b222a5bb23e629,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6hb5w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-7s6sk_openstack(ad2234e1-842b-4bba-bd21-9fb781403667): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:05:47 crc kubenswrapper[4492]: E1126 07:05:47.511733 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-7s6sk" podUID="ad2234e1-842b-4bba-bd21-9fb781403667" Nov 26 07:05:47 crc kubenswrapper[4492]: E1126 07:05:47.871871 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-heat-engine:1f5c0439f2433cb462b222a5bb23e629\\\"\"" pod="openstack/heat-db-sync-7s6sk" podUID="ad2234e1-842b-4bba-bd21-9fb781403667" Nov 26 07:05:47 crc kubenswrapper[4492]: E1126 07:05:47.948750 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-barbican-api:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:47 crc kubenswrapper[4492]: E1126 07:05:47.948806 4492 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-barbican-api:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:47 crc kubenswrapper[4492]: E1126 07:05:47.949017 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-barbican-api:1f5c0439f2433cb462b222a5bb23e629,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bsjsf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-9249p_openstack(e34f6949-eab2-4b97-9ba1-54ed3e59da5c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:05:47 crc kubenswrapper[4492]: E1126 07:05:47.950281 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-9249p" podUID="e34f6949-eab2-4b97-9ba1-54ed3e59da5c" Nov 26 07:05:47 crc kubenswrapper[4492]: I1126 07:05:47.952871 4492 scope.go:117] "RemoveContainer" containerID="6ed660ae76bb03fcdb60ad10cab39cce896c0b44698ded3973c28fc7db5ffffe" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.077277 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.083949 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217212 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b98vl\" (UniqueName: \"kubernetes.io/projected/6759f644-95cf-470e-8914-3583d8b0e11d-kube-api-access-b98vl\") pod \"6759f644-95cf-470e-8914-3583d8b0e11d\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217286 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-sb\") pod \"5bc62996-17f2-4415-a9d5-3219cfb079f9\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217336 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6759f644-95cf-470e-8914-3583d8b0e11d-logs\") pod \"6759f644-95cf-470e-8914-3583d8b0e11d\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217481 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6759f644-95cf-470e-8914-3583d8b0e11d-horizon-secret-key\") pod \"6759f644-95cf-470e-8914-3583d8b0e11d\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217538 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-dns-svc\") pod \"5bc62996-17f2-4415-a9d5-3219cfb079f9\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217569 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-config-data\") pod \"6759f644-95cf-470e-8914-3583d8b0e11d\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217719 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-nb\") pod \"5bc62996-17f2-4415-a9d5-3219cfb079f9\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217786 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-scripts\") pod \"6759f644-95cf-470e-8914-3583d8b0e11d\" (UID: \"6759f644-95cf-470e-8914-3583d8b0e11d\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217825 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsg78\" (UniqueName: \"kubernetes.io/projected/5bc62996-17f2-4415-a9d5-3219cfb079f9-kube-api-access-bsg78\") pod \"5bc62996-17f2-4415-a9d5-3219cfb079f9\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.217849 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-config\") pod \"5bc62996-17f2-4415-a9d5-3219cfb079f9\" (UID: \"5bc62996-17f2-4415-a9d5-3219cfb079f9\") " Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.218441 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6759f644-95cf-470e-8914-3583d8b0e11d-logs" (OuterVolumeSpecName: "logs") pod "6759f644-95cf-470e-8914-3583d8b0e11d" (UID: "6759f644-95cf-470e-8914-3583d8b0e11d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.219043 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-scripts" (OuterVolumeSpecName: "scripts") pod "6759f644-95cf-470e-8914-3583d8b0e11d" (UID: "6759f644-95cf-470e-8914-3583d8b0e11d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.219723 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-config-data" (OuterVolumeSpecName: "config-data") pod "6759f644-95cf-470e-8914-3583d8b0e11d" (UID: "6759f644-95cf-470e-8914-3583d8b0e11d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.223861 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bc62996-17f2-4415-a9d5-3219cfb079f9-kube-api-access-bsg78" (OuterVolumeSpecName: "kube-api-access-bsg78") pod "5bc62996-17f2-4415-a9d5-3219cfb079f9" (UID: "5bc62996-17f2-4415-a9d5-3219cfb079f9"). InnerVolumeSpecName "kube-api-access-bsg78". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.224204 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6759f644-95cf-470e-8914-3583d8b0e11d-kube-api-access-b98vl" (OuterVolumeSpecName: "kube-api-access-b98vl") pod "6759f644-95cf-470e-8914-3583d8b0e11d" (UID: "6759f644-95cf-470e-8914-3583d8b0e11d"). InnerVolumeSpecName "kube-api-access-b98vl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.226571 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6759f644-95cf-470e-8914-3583d8b0e11d-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "6759f644-95cf-470e-8914-3583d8b0e11d" (UID: "6759f644-95cf-470e-8914-3583d8b0e11d"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.257560 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5bc62996-17f2-4415-a9d5-3219cfb079f9" (UID: "5bc62996-17f2-4415-a9d5-3219cfb079f9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.267787 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-config" (OuterVolumeSpecName: "config") pod "5bc62996-17f2-4415-a9d5-3219cfb079f9" (UID: "5bc62996-17f2-4415-a9d5-3219cfb079f9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.270775 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5bc62996-17f2-4415-a9d5-3219cfb079f9" (UID: "5bc62996-17f2-4415-a9d5-3219cfb079f9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.271343 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5bc62996-17f2-4415-a9d5-3219cfb079f9" (UID: "5bc62996-17f2-4415-a9d5-3219cfb079f9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320719 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320743 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsg78\" (UniqueName: \"kubernetes.io/projected/5bc62996-17f2-4415-a9d5-3219cfb079f9-kube-api-access-bsg78\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320755 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320763 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b98vl\" (UniqueName: \"kubernetes.io/projected/6759f644-95cf-470e-8914-3583d8b0e11d-kube-api-access-b98vl\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320773 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320780 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6759f644-95cf-470e-8914-3583d8b0e11d-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320787 4492 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6759f644-95cf-470e-8914-3583d8b0e11d-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320795 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320803 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6759f644-95cf-470e-8914-3583d8b0e11d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.320810 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5bc62996-17f2-4415-a9d5-3219cfb079f9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:48 crc kubenswrapper[4492]: E1126 07:05:48.422149 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-ceilometer-central:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:48 crc kubenswrapper[4492]: E1126 07:05:48.422229 4492 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-ceilometer-central:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:48 crc kubenswrapper[4492]: E1126 07:05:48.422425 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-ceilometer-central:1f5c0439f2433cb462b222a5bb23e629,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5dbh57fh58hc8h7h64chc5h7bh85h5f9h5cchc7h5dfh697h556h579h59h55fh644h5b6hfdh56h56dh9fh65dh5ddh6dh567h579h5d9h56ch667q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xbm4n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.830099 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6b48756c9b-4pxln"] Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.888854 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6449747765-nngzp" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.888865 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6449747765-nngzp" event={"ID":"5bc62996-17f2-4415-a9d5-3219cfb079f9","Type":"ContainerDied","Data":"b613ab9e36662d97f786b17a9ea57584a570c5bf49a3db05bcd2ad894a05acd0"} Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.901871 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5795cb96f7-nkq8g" event={"ID":"6759f644-95cf-470e-8914-3583d8b0e11d","Type":"ContainerDied","Data":"374b88987c576feed8070e17b31c07a65dbe509e5ddecec00f8cec2cb0e55f11"} Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.901911 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5795cb96f7-nkq8g" Nov 26 07:05:48 crc kubenswrapper[4492]: E1126 07:05:48.905643 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-barbican-api:1f5c0439f2433cb462b222a5bb23e629\\\"\"" pod="openstack/barbican-db-sync-9249p" podUID="e34f6949-eab2-4b97-9ba1-54ed3e59da5c" Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.920316 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6449747765-nngzp"] Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.926838 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6449747765-nngzp"] Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.966536 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5795cb96f7-nkq8g"] Nov 26 07:05:48 crc kubenswrapper[4492]: I1126 07:05:48.977792 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5795cb96f7-nkq8g"] Nov 26 07:05:49 crc kubenswrapper[4492]: E1126 07:05:49.531680 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:49 crc kubenswrapper[4492]: E1126 07:05:49.531972 4492 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:05:49 crc kubenswrapper[4492]: E1126 07:05:49.532158 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:1f5c0439f2433cb462b222a5bb23e629,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b5744,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-8w6bv_openstack(a497bcf3-f8db-4b08-b5e3-33d050f9901a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:05:49 crc kubenswrapper[4492]: E1126 07:05:49.533636 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-8w6bv" podUID="a497bcf3-f8db-4b08-b5e3-33d050f9901a" Nov 26 07:05:49 crc kubenswrapper[4492]: I1126 07:05:49.593348 4492 scope.go:117] "RemoveContainer" containerID="60384fa21f2d35fc5f7de26f3616131eeca11ebd6345a223a104f8bd041f95a3" Nov 26 07:05:49 crc kubenswrapper[4492]: I1126 07:05:49.776690 4492 scope.go:117] "RemoveContainer" containerID="8a9f806c64ddc0fedd6019b388d9f6ed08ae7de9c66465ceee2c48f93f85d875" Nov 26 07:05:49 crc kubenswrapper[4492]: I1126 07:05:49.982249 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6b48756c9b-4pxln" event={"ID":"66834ee2-a38b-4d8d-9195-c4af38dc8a9b","Type":"ContainerStarted","Data":"4f297f33e1826a88dbefec6962ab2cc3c67122685b1dbc6da5387bb9f87df9d8"} Nov 26 07:05:49 crc kubenswrapper[4492]: E1126 07:05:49.990602 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-cinder-api:1f5c0439f2433cb462b222a5bb23e629\\\"\"" pod="openstack/cinder-db-sync-8w6bv" podUID="a497bcf3-f8db-4b08-b5e3-33d050f9901a" Nov 26 07:05:50 crc kubenswrapper[4492]: I1126 07:05:50.138151 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7bb6557f96-rgc7g"] Nov 26 07:05:50 crc kubenswrapper[4492]: I1126 07:05:50.225782 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:50 crc kubenswrapper[4492]: I1126 07:05:50.301446 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f9588bddf-km75q"] Nov 26 07:05:50 crc kubenswrapper[4492]: I1126 07:05:50.338113 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cb64f9797-5jdj2"] Nov 26 07:05:50 crc kubenswrapper[4492]: I1126 07:05:50.409447 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86bdc94cc6-ws8xc"] Nov 26 07:05:50 crc kubenswrapper[4492]: I1126 07:05:50.454695 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" path="/var/lib/kubelet/pods/5bc62996-17f2-4415-a9d5-3219cfb079f9/volumes" Nov 26 07:05:50 crc kubenswrapper[4492]: I1126 07:05:50.455336 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6759f644-95cf-470e-8914-3583d8b0e11d" path="/var/lib/kubelet/pods/6759f644-95cf-470e-8914-3583d8b0e11d/volumes" Nov 26 07:05:50 crc kubenswrapper[4492]: I1126 07:05:50.459770 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hqvtp"] Nov 26 07:05:50 crc kubenswrapper[4492]: W1126 07:05:50.467378 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddbe81971_8d1f_4681_9bfb_5b13a46a5788.slice/crio-253b25a5cd900b39a0d9a0162ca2039180d1fa2384189f90d9d176fe18d8459b WatchSource:0}: Error finding container 253b25a5cd900b39a0d9a0162ca2039180d1fa2384189f90d9d176fe18d8459b: Status 404 returned error can't find the container with id 253b25a5cd900b39a0d9a0162ca2039180d1fa2384189f90d9d176fe18d8459b Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.015561 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0033e995-8279-4229-8ea7-7339427960a8","Type":"ContainerStarted","Data":"a54901be5497d85213d23e1961046affdbf5d4120189d80f95a72c001e93607a"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.037413 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bd7a8c3f-6281-4b33-a56a-2a93844e584d","Type":"ContainerStarted","Data":"335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.037511 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerName="glance-log" containerID="cri-o://5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719" gracePeriod=30 Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.037697 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerName="glance-httpd" containerID="cri-o://335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e" gracePeriod=30 Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.045958 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-79hj2" event={"ID":"0093dcb6-c7e5-4b5a-94a3-55fc7465109a","Type":"ContainerStarted","Data":"e57a8174821d3fac1cf7e65a6bc615d669095d79385d24cb311950da4fcd023d"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.048410 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hqvtp" event={"ID":"dbe81971-8d1f-4681-9bfb-5b13a46a5788","Type":"ContainerStarted","Data":"253b25a5cd900b39a0d9a0162ca2039180d1fa2384189f90d9d176fe18d8459b"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.049763 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6b48756c9b-4pxln" event={"ID":"66834ee2-a38b-4d8d-9195-c4af38dc8a9b","Type":"ContainerStarted","Data":"bfabacd0af145bc7cf1b29ca31987971c9329d2cdc1ee6364c1296d00cb0d958"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.054914 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6688b9cf7f-qsr5d" event={"ID":"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e","Type":"ContainerStarted","Data":"ee2e730f36b7b0b2ba530aaaba90beffab19bcde1442721beb67c96945a7f4c6"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.054957 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6688b9cf7f-qsr5d" event={"ID":"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e","Type":"ContainerStarted","Data":"c0343fac632313410975f87ee4e707640869581c34278f0a595e9c9f79e1b7ee"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.055077 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6688b9cf7f-qsr5d" podUID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerName="horizon-log" containerID="cri-o://c0343fac632313410975f87ee4e707640869581c34278f0a595e9c9f79e1b7ee" gracePeriod=30 Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.055244 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6688b9cf7f-qsr5d" podUID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerName="horizon" containerID="cri-o://ee2e730f36b7b0b2ba530aaaba90beffab19bcde1442721beb67c96945a7f4c6" gracePeriod=30 Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.060346 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb6557f96-rgc7g" event={"ID":"a15ec528-9195-4dfe-95b7-e30a44f74b44","Type":"ContainerStarted","Data":"1ae7aee7de996ae15f21a63313f4987ac66b6771692515d2a9f4fbf55b2e1331"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.060376 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb6557f96-rgc7g" event={"ID":"a15ec528-9195-4dfe-95b7-e30a44f74b44","Type":"ContainerStarted","Data":"85e4d5fcd357f218da5d70be2a09539ee24903ed48cfbf53ab2597b6149b8bf4"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.061283 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86bdc94cc6-ws8xc" event={"ID":"38194db3-f048-45e5-80d6-7dfa8f1f7420","Type":"ContainerStarted","Data":"4111ab1bb3fe986ae32d2959570b7d0df410889fd578a7b0d39600e7a279ea9c"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.073197 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=31.073183698 podStartE2EDuration="31.073183698s" podCreationTimestamp="2025-11-26 07:05:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:51.065198384 +0000 UTC m=+1046.949086681" watchObservedRunningTime="2025-11-26 07:05:51.073183698 +0000 UTC m=+1046.957071986" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.078673 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f9588bddf-km75q" event={"ID":"7c875225-0af5-49c4-b273-85ed6c498f18","Type":"ContainerStarted","Data":"c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.078704 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f9588bddf-km75q" event={"ID":"7c875225-0af5-49c4-b273-85ed6c498f18","Type":"ContainerStarted","Data":"b8bdf71a4288fd2da3f7ff415135962a77b29f00aa12ca62a7da9609a83c8b7b"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.080508 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" event={"ID":"4f397877-f399-472d-a32d-11cb9b87fd73","Type":"ContainerStarted","Data":"6f070c6428d4d6c332433a36efc6332be9724108f8fef6baac21cb4dd948010d"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.090270 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-644687db57-zx74g" event={"ID":"c4316787-8af1-40ef-995a-0f8aabd1bf11","Type":"ContainerStarted","Data":"d79f7f9b2ee5ec6e625bea73ac1fee9161ae5491dcd3ca6ae78813c618c41561"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.090298 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-644687db57-zx74g" event={"ID":"c4316787-8af1-40ef-995a-0f8aabd1bf11","Type":"ContainerStarted","Data":"ed025a70c3520f1170320f171572560c3fe7c72ad4df5765db7df0c2d5de6892"} Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.090401 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-644687db57-zx74g" podUID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerName="horizon-log" containerID="cri-o://ed025a70c3520f1170320f171572560c3fe7c72ad4df5765db7df0c2d5de6892" gracePeriod=30 Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.090586 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-644687db57-zx74g" podUID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerName="horizon" containerID="cri-o://d79f7f9b2ee5ec6e625bea73ac1fee9161ae5491dcd3ca6ae78813c618c41561" gracePeriod=30 Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.096731 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6688b9cf7f-qsr5d" podStartSLOduration=4.138078982 podStartE2EDuration="38.096721223s" podCreationTimestamp="2025-11-26 07:05:13 +0000 UTC" firstStartedPulling="2025-11-26 07:05:15.55775108 +0000 UTC m=+1011.441639377" lastFinishedPulling="2025-11-26 07:05:49.51639332 +0000 UTC m=+1045.400281618" observedRunningTime="2025-11-26 07:05:51.086560047 +0000 UTC m=+1046.970448346" watchObservedRunningTime="2025-11-26 07:05:51.096721223 +0000 UTC m=+1046.980609521" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.109409 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-79hj2" podStartSLOduration=5.69380497 podStartE2EDuration="38.109401254s" podCreationTimestamp="2025-11-26 07:05:13 +0000 UTC" firstStartedPulling="2025-11-26 07:05:15.557416471 +0000 UTC m=+1011.441304769" lastFinishedPulling="2025-11-26 07:05:47.973012755 +0000 UTC m=+1043.856901053" observedRunningTime="2025-11-26 07:05:51.104327494 +0000 UTC m=+1046.988215792" watchObservedRunningTime="2025-11-26 07:05:51.109401254 +0000 UTC m=+1046.993289552" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.122963 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-644687db57-zx74g" podStartSLOduration=4.46811832 podStartE2EDuration="34.12295626s" podCreationTimestamp="2025-11-26 07:05:17 +0000 UTC" firstStartedPulling="2025-11-26 07:05:18.318457435 +0000 UTC m=+1014.202345734" lastFinishedPulling="2025-11-26 07:05:47.973295376 +0000 UTC m=+1043.857183674" observedRunningTime="2025-11-26 07:05:51.121142259 +0000 UTC m=+1047.005030558" watchObservedRunningTime="2025-11-26 07:05:51.12295626 +0000 UTC m=+1047.006844558" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.323472 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6449747765-nngzp" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.112:5353: i/o timeout" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.762889 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.926190 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.926377 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-httpd-run\") pod \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.926423 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-logs\") pod \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.926465 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-scripts\") pod \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.926739 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-combined-ca-bundle\") pod \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.926802 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zdls\" (UniqueName: \"kubernetes.io/projected/bd7a8c3f-6281-4b33-a56a-2a93844e584d-kube-api-access-2zdls\") pod \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.927065 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-config-data\") pod \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\" (UID: \"bd7a8c3f-6281-4b33-a56a-2a93844e584d\") " Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.929822 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "bd7a8c3f-6281-4b33-a56a-2a93844e584d" (UID: "bd7a8c3f-6281-4b33-a56a-2a93844e584d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.930269 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-logs" (OuterVolumeSpecName: "logs") pod "bd7a8c3f-6281-4b33-a56a-2a93844e584d" (UID: "bd7a8c3f-6281-4b33-a56a-2a93844e584d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.945385 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "bd7a8c3f-6281-4b33-a56a-2a93844e584d" (UID: "bd7a8c3f-6281-4b33-a56a-2a93844e584d"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.957781 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd7a8c3f-6281-4b33-a56a-2a93844e584d-kube-api-access-2zdls" (OuterVolumeSpecName: "kube-api-access-2zdls") pod "bd7a8c3f-6281-4b33-a56a-2a93844e584d" (UID: "bd7a8c3f-6281-4b33-a56a-2a93844e584d"). InnerVolumeSpecName "kube-api-access-2zdls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:51 crc kubenswrapper[4492]: I1126 07:05:51.958547 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-scripts" (OuterVolumeSpecName: "scripts") pod "bd7a8c3f-6281-4b33-a56a-2a93844e584d" (UID: "bd7a8c3f-6281-4b33-a56a-2a93844e584d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.012280 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd7a8c3f-6281-4b33-a56a-2a93844e584d" (UID: "bd7a8c3f-6281-4b33-a56a-2a93844e584d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.015269 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-config-data" (OuterVolumeSpecName: "config-data") pod "bd7a8c3f-6281-4b33-a56a-2a93844e584d" (UID: "bd7a8c3f-6281-4b33-a56a-2a93844e584d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.029983 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.030398 4492 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.030412 4492 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.030425 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd7a8c3f-6281-4b33-a56a-2a93844e584d-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.030434 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.030444 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd7a8c3f-6281-4b33-a56a-2a93844e584d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.030455 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zdls\" (UniqueName: \"kubernetes.io/projected/bd7a8c3f-6281-4b33-a56a-2a93844e584d-kube-api-access-2zdls\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.060332 4492 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.134960 4492 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.145979 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f9588bddf-km75q" event={"ID":"7c875225-0af5-49c4-b273-85ed6c498f18","Type":"ContainerStarted","Data":"be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.153379 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.173572 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb6557f96-rgc7g" event={"ID":"a15ec528-9195-4dfe-95b7-e30a44f74b44","Type":"ContainerStarted","Data":"43fafd6a195677c69054be8121016986713c956dc138b464b122d51ce9a8af53"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.189767 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc","Type":"ContainerStarted","Data":"5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.194658 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5f9588bddf-km75q" podStartSLOduration=18.194640236 podStartE2EDuration="18.194640236s" podCreationTimestamp="2025-11-26 07:05:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:52.172936359 +0000 UTC m=+1048.056824658" watchObservedRunningTime="2025-11-26 07:05:52.194640236 +0000 UTC m=+1048.078528534" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.202213 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hqvtp" event={"ID":"dbe81971-8d1f-4681-9bfb-5b13a46a5788","Type":"ContainerStarted","Data":"ad33beecc6ccb3a991f02d101327f026e456d05ca55d63d06df60954716ad9d4"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.204999 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7bb6557f96-rgc7g" podStartSLOduration=29.204989314 podStartE2EDuration="29.204989314s" podCreationTimestamp="2025-11-26 07:05:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:52.195851083 +0000 UTC m=+1048.079739382" watchObservedRunningTime="2025-11-26 07:05:52.204989314 +0000 UTC m=+1048.088877613" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.208545 4492 generic.go:334] "Generic (PLEG): container finished" podID="0093dcb6-c7e5-4b5a-94a3-55fc7465109a" containerID="e57a8174821d3fac1cf7e65a6bc615d669095d79385d24cb311950da4fcd023d" exitCode=0 Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.208593 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-79hj2" event={"ID":"0093dcb6-c7e5-4b5a-94a3-55fc7465109a","Type":"ContainerDied","Data":"e57a8174821d3fac1cf7e65a6bc615d669095d79385d24cb311950da4fcd023d"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.210649 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86bdc94cc6-ws8xc" event={"ID":"38194db3-f048-45e5-80d6-7dfa8f1f7420","Type":"ContainerStarted","Data":"16a3767d292dc632cef3268e9d839310095282f2a888535ea9fcc2cd0db9fa99"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.210677 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86bdc94cc6-ws8xc" event={"ID":"38194db3-f048-45e5-80d6-7dfa8f1f7420","Type":"ContainerStarted","Data":"4a7744552769063790091905df7e367ce4227767f52b93cb69f2cf6f31dd1597"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.213411 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.218106 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-hqvtp" podStartSLOduration=13.218096218 podStartE2EDuration="13.218096218s" podCreationTimestamp="2025-11-26 07:05:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:52.21742663 +0000 UTC m=+1048.101314918" watchObservedRunningTime="2025-11-26 07:05:52.218096218 +0000 UTC m=+1048.101984517" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.222882 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6b48756c9b-4pxln" event={"ID":"66834ee2-a38b-4d8d-9195-c4af38dc8a9b","Type":"ContainerStarted","Data":"f7c2e4403ed6dcbc3b6601fc79da890c7dcf0f7e702ec286a1a637fd30b07bee"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.250304 4492 generic.go:334] "Generic (PLEG): container finished" podID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerID="335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e" exitCode=0 Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.250331 4492 generic.go:334] "Generic (PLEG): container finished" podID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerID="5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719" exitCode=143 Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.250369 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bd7a8c3f-6281-4b33-a56a-2a93844e584d","Type":"ContainerDied","Data":"335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.250390 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bd7a8c3f-6281-4b33-a56a-2a93844e584d","Type":"ContainerDied","Data":"5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.250399 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bd7a8c3f-6281-4b33-a56a-2a93844e584d","Type":"ContainerDied","Data":"bd7858a4065c31ad310468d7e37fc6d985b4c57ef605ecf45399f6b158e6a53d"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.250413 4492 scope.go:117] "RemoveContainer" containerID="335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.250514 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.269842 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-86bdc94cc6-ws8xc" podStartSLOduration=20.269823084 podStartE2EDuration="20.269823084s" podCreationTimestamp="2025-11-26 07:05:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:52.234506212 +0000 UTC m=+1048.118394520" watchObservedRunningTime="2025-11-26 07:05:52.269823084 +0000 UTC m=+1048.153711382" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.284075 4492 generic.go:334] "Generic (PLEG): container finished" podID="4f397877-f399-472d-a32d-11cb9b87fd73" containerID="dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318" exitCode=0 Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.284144 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" event={"ID":"4f397877-f399-472d-a32d-11cb9b87fd73","Type":"ContainerDied","Data":"dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.301650 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0033e995-8279-4229-8ea7-7339427960a8","Type":"ContainerStarted","Data":"475390b01e9e8c15150563cb3690b9bbf1e5c2ba5bac57607388fb705caf9319"} Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.304606 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6b48756c9b-4pxln" podStartSLOduration=28.304590744 podStartE2EDuration="28.304590744s" podCreationTimestamp="2025-11-26 07:05:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:52.277444243 +0000 UTC m=+1048.161332542" watchObservedRunningTime="2025-11-26 07:05:52.304590744 +0000 UTC m=+1048.188479042" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.319648 4492 scope.go:117] "RemoveContainer" containerID="5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.371228 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.400214 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.415953 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:52 crc kubenswrapper[4492]: E1126 07:05:52.416449 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerName="glance-log" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.416471 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerName="glance-log" Nov 26 07:05:52 crc kubenswrapper[4492]: E1126 07:05:52.416498 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerName="glance-httpd" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.416503 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerName="glance-httpd" Nov 26 07:05:52 crc kubenswrapper[4492]: E1126 07:05:52.416526 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="dnsmasq-dns" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.416531 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="dnsmasq-dns" Nov 26 07:05:52 crc kubenswrapper[4492]: E1126 07:05:52.416543 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="init" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.416548 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="init" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.416705 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerName="glance-httpd" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.416724 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" containerName="glance-log" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.416733 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bc62996-17f2-4415-a9d5-3219cfb079f9" containerName="dnsmasq-dns" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.417731 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.423599 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.423752 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.431640 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.464480 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd7a8c3f-6281-4b33-a56a-2a93844e584d" path="/var/lib/kubelet/pods/bd7a8c3f-6281-4b33-a56a-2a93844e584d/volumes" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.482004 4492 scope.go:117] "RemoveContainer" containerID="335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e" Nov 26 07:05:52 crc kubenswrapper[4492]: E1126 07:05:52.485338 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e\": container with ID starting with 335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e not found: ID does not exist" containerID="335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.485380 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e"} err="failed to get container status \"335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e\": rpc error: code = NotFound desc = could not find container \"335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e\": container with ID starting with 335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e not found: ID does not exist" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.485409 4492 scope.go:117] "RemoveContainer" containerID="5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719" Nov 26 07:05:52 crc kubenswrapper[4492]: E1126 07:05:52.488235 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719\": container with ID starting with 5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719 not found: ID does not exist" containerID="5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.488269 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719"} err="failed to get container status \"5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719\": rpc error: code = NotFound desc = could not find container \"5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719\": container with ID starting with 5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719 not found: ID does not exist" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.488289 4492 scope.go:117] "RemoveContainer" containerID="335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.492215 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e"} err="failed to get container status \"335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e\": rpc error: code = NotFound desc = could not find container \"335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e\": container with ID starting with 335f331dbee7a43d7b3042186392923490e0ef304ad79cbf5fbd97fd716a997e not found: ID does not exist" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.492245 4492 scope.go:117] "RemoveContainer" containerID="5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.496324 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719"} err="failed to get container status \"5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719\": rpc error: code = NotFound desc = could not find container \"5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719\": container with ID starting with 5cfe2cdbc5fbe39dfa408de2f3d42ecc8fcd3a1ea462a7493c76e15b87cec719 not found: ID does not exist" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.500404 4492 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod0dfd6d10-c083-4e37-8ce9-7578e8ea8fd1] : Timed out while waiting for systemd to remove kubepods-besteffort-pod0dfd6d10_c083_4e37_8ce9_7578e8ea8fd1.slice" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.550562 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-config-data\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.550674 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.550723 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.550782 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxhr5\" (UniqueName: \"kubernetes.io/projected/37ed6b48-37b7-479f-837f-d49432778c49-kube-api-access-kxhr5\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.550814 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-logs\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.550840 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-scripts\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.550856 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.550872 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.653881 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-config-data\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.654158 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.655110 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.655201 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.656038 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxhr5\" (UniqueName: \"kubernetes.io/projected/37ed6b48-37b7-479f-837f-d49432778c49-kube-api-access-kxhr5\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.656248 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-logs\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.656404 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-scripts\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.656470 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.656507 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-logs\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.656600 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.656623 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.672393 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.672409 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-scripts\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.672867 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.677629 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-config-data\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.678063 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxhr5\" (UniqueName: \"kubernetes.io/projected/37ed6b48-37b7-479f-837f-d49432778c49-kube-api-access-kxhr5\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.694273 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " pod="openstack/glance-default-external-api-0" Nov 26 07:05:52 crc kubenswrapper[4492]: I1126 07:05:52.792316 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:05:53 crc kubenswrapper[4492]: I1126 07:05:53.323554 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" event={"ID":"4f397877-f399-472d-a32d-11cb9b87fd73","Type":"ContainerStarted","Data":"e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa"} Nov 26 07:05:53 crc kubenswrapper[4492]: I1126 07:05:53.323824 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:53 crc kubenswrapper[4492]: I1126 07:05:53.336279 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0033e995-8279-4229-8ea7-7339427960a8","Type":"ContainerStarted","Data":"819d53d9735776db0f3f096f2963801b83c52364998e4734ca417bde1a893170"} Nov 26 07:05:53 crc kubenswrapper[4492]: I1126 07:05:53.336745 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0033e995-8279-4229-8ea7-7339427960a8" containerName="glance-log" containerID="cri-o://475390b01e9e8c15150563cb3690b9bbf1e5c2ba5bac57607388fb705caf9319" gracePeriod=30 Nov 26 07:05:53 crc kubenswrapper[4492]: I1126 07:05:53.337033 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0033e995-8279-4229-8ea7-7339427960a8" containerName="glance-httpd" containerID="cri-o://819d53d9735776db0f3f096f2963801b83c52364998e4734ca417bde1a893170" gracePeriod=30 Nov 26 07:05:53 crc kubenswrapper[4492]: I1126 07:05:53.346071 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" podStartSLOduration=21.346060462 podStartE2EDuration="21.346060462s" podCreationTimestamp="2025-11-26 07:05:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:53.342753505 +0000 UTC m=+1049.226641792" watchObservedRunningTime="2025-11-26 07:05:53.346060462 +0000 UTC m=+1049.229948760" Nov 26 07:05:53 crc kubenswrapper[4492]: I1126 07:05:53.361484 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=31.36147223 podStartE2EDuration="31.36147223s" podCreationTimestamp="2025-11-26 07:05:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:53.3590366 +0000 UTC m=+1049.242924898" watchObservedRunningTime="2025-11-26 07:05:53.36147223 +0000 UTC m=+1049.245360528" Nov 26 07:05:53 crc kubenswrapper[4492]: I1126 07:05:53.567215 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.117372 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.366278 4492 generic.go:334] "Generic (PLEG): container finished" podID="0033e995-8279-4229-8ea7-7339427960a8" containerID="819d53d9735776db0f3f096f2963801b83c52364998e4734ca417bde1a893170" exitCode=0 Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.366307 4492 generic.go:334] "Generic (PLEG): container finished" podID="0033e995-8279-4229-8ea7-7339427960a8" containerID="475390b01e9e8c15150563cb3690b9bbf1e5c2ba5bac57607388fb705caf9319" exitCode=143 Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.366362 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0033e995-8279-4229-8ea7-7339427960a8","Type":"ContainerDied","Data":"819d53d9735776db0f3f096f2963801b83c52364998e4734ca417bde1a893170"} Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.366393 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0033e995-8279-4229-8ea7-7339427960a8","Type":"ContainerDied","Data":"475390b01e9e8c15150563cb3690b9bbf1e5c2ba5bac57607388fb705caf9319"} Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.369814 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.369847 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.371675 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37ed6b48-37b7-479f-837f-d49432778c49","Type":"ContainerStarted","Data":"8c3d663e6cefacc24b7ee290273dea6e676208d64b608e3d4fbc1e927295ac14"} Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.491618 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.491668 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.497407 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.626650 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bt6p\" (UniqueName: \"kubernetes.io/projected/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-kube-api-access-4bt6p\") pod \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.627000 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-scripts\") pod \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.627053 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-logs\") pod \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.627077 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-config-data\") pod \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.627300 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-combined-ca-bundle\") pod \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\" (UID: \"0093dcb6-c7e5-4b5a-94a3-55fc7465109a\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.627598 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-logs" (OuterVolumeSpecName: "logs") pod "0093dcb6-c7e5-4b5a-94a3-55fc7465109a" (UID: "0093dcb6-c7e5-4b5a-94a3-55fc7465109a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.627944 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.637287 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-scripts" (OuterVolumeSpecName: "scripts") pod "0093dcb6-c7e5-4b5a-94a3-55fc7465109a" (UID: "0093dcb6-c7e5-4b5a-94a3-55fc7465109a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.642394 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-kube-api-access-4bt6p" (OuterVolumeSpecName: "kube-api-access-4bt6p") pod "0093dcb6-c7e5-4b5a-94a3-55fc7465109a" (UID: "0093dcb6-c7e5-4b5a-94a3-55fc7465109a"). InnerVolumeSpecName "kube-api-access-4bt6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.680728 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0093dcb6-c7e5-4b5a-94a3-55fc7465109a" (UID: "0093dcb6-c7e5-4b5a-94a3-55fc7465109a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.710556 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-config-data" (OuterVolumeSpecName: "config-data") pod "0093dcb6-c7e5-4b5a-94a3-55fc7465109a" (UID: "0093dcb6-c7e5-4b5a-94a3-55fc7465109a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.740472 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bt6p\" (UniqueName: \"kubernetes.io/projected/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-kube-api-access-4bt6p\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.740508 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.740518 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.740528 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0093dcb6-c7e5-4b5a-94a3-55fc7465109a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.862563 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.944666 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-config-data\") pod \"0033e995-8279-4229-8ea7-7339427960a8\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.944711 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5tnz\" (UniqueName: \"kubernetes.io/projected/0033e995-8279-4229-8ea7-7339427960a8-kube-api-access-h5tnz\") pod \"0033e995-8279-4229-8ea7-7339427960a8\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.944888 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-combined-ca-bundle\") pod \"0033e995-8279-4229-8ea7-7339427960a8\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.945081 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-scripts\") pod \"0033e995-8279-4229-8ea7-7339427960a8\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.945147 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"0033e995-8279-4229-8ea7-7339427960a8\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.945304 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-httpd-run\") pod \"0033e995-8279-4229-8ea7-7339427960a8\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.945361 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-logs\") pod \"0033e995-8279-4229-8ea7-7339427960a8\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.945589 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-internal-tls-certs\") pod \"0033e995-8279-4229-8ea7-7339427960a8\" (UID: \"0033e995-8279-4229-8ea7-7339427960a8\") " Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.951997 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0033e995-8279-4229-8ea7-7339427960a8" (UID: "0033e995-8279-4229-8ea7-7339427960a8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.954291 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-logs" (OuterVolumeSpecName: "logs") pod "0033e995-8279-4229-8ea7-7339427960a8" (UID: "0033e995-8279-4229-8ea7-7339427960a8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.954598 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-scripts" (OuterVolumeSpecName: "scripts") pod "0033e995-8279-4229-8ea7-7339427960a8" (UID: "0033e995-8279-4229-8ea7-7339427960a8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.960433 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0033e995-8279-4229-8ea7-7339427960a8-kube-api-access-h5tnz" (OuterVolumeSpecName: "kube-api-access-h5tnz") pod "0033e995-8279-4229-8ea7-7339427960a8" (UID: "0033e995-8279-4229-8ea7-7339427960a8"). InnerVolumeSpecName "kube-api-access-h5tnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:54 crc kubenswrapper[4492]: I1126 07:05:54.985470 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "0033e995-8279-4229-8ea7-7339427960a8" (UID: "0033e995-8279-4229-8ea7-7339427960a8"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.022324 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0033e995-8279-4229-8ea7-7339427960a8" (UID: "0033e995-8279-4229-8ea7-7339427960a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.048406 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.048437 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.048468 4492 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.048479 4492 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.048487 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0033e995-8279-4229-8ea7-7339427960a8-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.048496 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5tnz\" (UniqueName: \"kubernetes.io/projected/0033e995-8279-4229-8ea7-7339427960a8-kube-api-access-h5tnz\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.049265 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0033e995-8279-4229-8ea7-7339427960a8" (UID: "0033e995-8279-4229-8ea7-7339427960a8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.067297 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-config-data" (OuterVolumeSpecName: "config-data") pod "0033e995-8279-4229-8ea7-7339427960a8" (UID: "0033e995-8279-4229-8ea7-7339427960a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.076278 4492 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.156278 4492 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.156447 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0033e995-8279-4229-8ea7-7339427960a8-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.156538 4492 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.423601 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0033e995-8279-4229-8ea7-7339427960a8","Type":"ContainerDied","Data":"a54901be5497d85213d23e1961046affdbf5d4120189d80f95a72c001e93607a"} Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.423986 4492 scope.go:117] "RemoveContainer" containerID="819d53d9735776db0f3f096f2963801b83c52364998e4734ca417bde1a893170" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.423665 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.455446 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37ed6b48-37b7-479f-837f-d49432778c49","Type":"ContainerStarted","Data":"56f8eb6952e555b8950f54a9952d93a5f0c9484e60f683a0a4cfaa23ea23f274"} Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.458993 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-79hj2" event={"ID":"0093dcb6-c7e5-4b5a-94a3-55fc7465109a","Type":"ContainerDied","Data":"0d2115bb750477e4e896596a267d9b7048f45aca4a72cf2f70c95eeec466b2d1"} Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.459043 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d2115bb750477e4e896596a267d9b7048f45aca4a72cf2f70c95eeec466b2d1" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.459110 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-79hj2" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.484956 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.504788 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.526239 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:55 crc kubenswrapper[4492]: E1126 07:05:55.526961 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0033e995-8279-4229-8ea7-7339427960a8" containerName="glance-httpd" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.526986 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0033e995-8279-4229-8ea7-7339427960a8" containerName="glance-httpd" Nov 26 07:05:55 crc kubenswrapper[4492]: E1126 07:05:55.527024 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0093dcb6-c7e5-4b5a-94a3-55fc7465109a" containerName="placement-db-sync" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.527032 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0093dcb6-c7e5-4b5a-94a3-55fc7465109a" containerName="placement-db-sync" Nov 26 07:05:55 crc kubenswrapper[4492]: E1126 07:05:55.527056 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0033e995-8279-4229-8ea7-7339427960a8" containerName="glance-log" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.527063 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0033e995-8279-4229-8ea7-7339427960a8" containerName="glance-log" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.527332 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0033e995-8279-4229-8ea7-7339427960a8" containerName="glance-log" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.527349 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0033e995-8279-4229-8ea7-7339427960a8" containerName="glance-httpd" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.527358 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0093dcb6-c7e5-4b5a-94a3-55fc7465109a" containerName="placement-db-sync" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.528753 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.532094 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.533615 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.545277 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.633509 4492 scope.go:117] "RemoveContainer" containerID="475390b01e9e8c15150563cb3690b9bbf1e5c2ba5bac57607388fb705caf9319" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.649231 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7656457cb-pbp6j"] Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.651017 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.665944 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.666097 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.666227 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.666332 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.666365 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-c7hqh" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.667206 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7656457cb-pbp6j"] Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.677849 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-logs\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.677914 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.677974 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5tct\" (UniqueName: \"kubernetes.io/projected/5497f148-a137-4ee1-9fed-689d83f91c2a-kube-api-access-h5tct\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.677992 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.678124 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.678201 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.678231 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.678256 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780626 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbwbh\" (UniqueName: \"kubernetes.io/projected/20137ee9-1674-4972-9952-441dbb870814-kube-api-access-dbwbh\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780674 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-combined-ca-bundle\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780709 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-config-data\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780739 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-logs\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780783 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780807 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5tct\" (UniqueName: \"kubernetes.io/projected/5497f148-a137-4ee1-9fed-689d83f91c2a-kube-api-access-h5tct\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780822 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780839 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20137ee9-1674-4972-9952-441dbb870814-logs\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780867 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-public-tls-certs\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780887 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-scripts\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780903 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-internal-tls-certs\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780932 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780959 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780976 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.780995 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.781517 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-logs\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.782138 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.784614 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.790940 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.809456 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.809712 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.814322 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.816773 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5tct\" (UniqueName: \"kubernetes.io/projected/5497f148-a137-4ee1-9fed-689d83f91c2a-kube-api-access-h5tct\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.889161 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbwbh\" (UniqueName: \"kubernetes.io/projected/20137ee9-1674-4972-9952-441dbb870814-kube-api-access-dbwbh\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.889233 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-combined-ca-bundle\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.889267 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-config-data\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.889313 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20137ee9-1674-4972-9952-441dbb870814-logs\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.889352 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-public-tls-certs\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.889435 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-scripts\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.889457 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-internal-tls-certs\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.904959 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20137ee9-1674-4972-9952-441dbb870814-logs\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.915608 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-scripts\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.918826 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.919421 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-internal-tls-certs\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.920577 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-config-data\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.921431 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-combined-ca-bundle\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.932112 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/20137ee9-1674-4972-9952-441dbb870814-public-tls-certs\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.955902 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbwbh\" (UniqueName: \"kubernetes.io/projected/20137ee9-1674-4972-9952-441dbb870814-kube-api-access-dbwbh\") pod \"placement-7656457cb-pbp6j\" (UID: \"20137ee9-1674-4972-9952-441dbb870814\") " pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:55 crc kubenswrapper[4492]: I1126 07:05:55.998695 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:56 crc kubenswrapper[4492]: I1126 07:05:56.194858 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:05:56 crc kubenswrapper[4492]: I1126 07:05:56.506568 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0033e995-8279-4229-8ea7-7339427960a8" path="/var/lib/kubelet/pods/0033e995-8279-4229-8ea7-7339427960a8/volumes" Nov 26 07:05:56 crc kubenswrapper[4492]: I1126 07:05:56.544125 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7656457cb-pbp6j"] Nov 26 07:05:56 crc kubenswrapper[4492]: I1126 07:05:56.910210 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.519391 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5497f148-a137-4ee1-9fed-689d83f91c2a","Type":"ContainerStarted","Data":"67c41ae023d0d9d699f8f9ef595a310dde7c1543485ed9241356ead78a260363"} Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.521983 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7656457cb-pbp6j" event={"ID":"20137ee9-1674-4972-9952-441dbb870814","Type":"ContainerStarted","Data":"3e419882e35c6d964ad8d369c3dff9dc0566376d5b359845d1da55157e1b67aa"} Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.522003 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7656457cb-pbp6j" event={"ID":"20137ee9-1674-4972-9952-441dbb870814","Type":"ContainerStarted","Data":"838eebc244f44d582d1d6ec6fe97f1bdd903d3145949691ad78ddd0aa531439d"} Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.522014 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7656457cb-pbp6j" event={"ID":"20137ee9-1674-4972-9952-441dbb870814","Type":"ContainerStarted","Data":"78bcc295d32d0bfc09a9b31c33bb649d8130222e967bc9320077c391dc087ad5"} Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.523014 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.523034 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.534607 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37ed6b48-37b7-479f-837f-d49432778c49","Type":"ContainerStarted","Data":"f0bcd12fa4afc6ff4c40910d74c5855c7cc966cd3bd6cc0b480334ee5744bacd"} Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.561005 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7656457cb-pbp6j" podStartSLOduration=2.5609862039999998 podStartE2EDuration="2.560986204s" podCreationTimestamp="2025-11-26 07:05:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:57.538144418 +0000 UTC m=+1053.422032715" watchObservedRunningTime="2025-11-26 07:05:57.560986204 +0000 UTC m=+1053.444874502" Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.597540 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.597518632 podStartE2EDuration="5.597518632s" podCreationTimestamp="2025-11-26 07:05:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:57.581788517 +0000 UTC m=+1053.465676814" watchObservedRunningTime="2025-11-26 07:05:57.597518632 +0000 UTC m=+1053.481406930" Nov 26 07:05:57 crc kubenswrapper[4492]: I1126 07:05:57.774718 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-644687db57-zx74g" Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.080352 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.151747 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d4b7d55d5-brf4j"] Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.151987 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" podUID="b74aeb41-307b-4cca-a876-985ea2601650" containerName="dnsmasq-dns" containerID="cri-o://aa78c3c3c0b9f516e77a60b0ed36169938e0144e9280774dd0d992da1a133746" gracePeriod=10 Nov 26 07:05:58 crc kubenswrapper[4492]: E1126 07:05:58.551950 4492 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb74aeb41_307b_4cca_a876_985ea2601650.slice/crio-aa78c3c3c0b9f516e77a60b0ed36169938e0144e9280774dd0d992da1a133746.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb74aeb41_307b_4cca_a876_985ea2601650.slice/crio-conmon-aa78c3c3c0b9f516e77a60b0ed36169938e0144e9280774dd0d992da1a133746.scope\": RecentStats: unable to find data in memory cache]" Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.575685 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5497f148-a137-4ee1-9fed-689d83f91c2a","Type":"ContainerStarted","Data":"6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906"} Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.588740 4492 generic.go:334] "Generic (PLEG): container finished" podID="b74aeb41-307b-4cca-a876-985ea2601650" containerID="aa78c3c3c0b9f516e77a60b0ed36169938e0144e9280774dd0d992da1a133746" exitCode=0 Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.588832 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" event={"ID":"b74aeb41-307b-4cca-a876-985ea2601650","Type":"ContainerDied","Data":"aa78c3c3c0b9f516e77a60b0ed36169938e0144e9280774dd0d992da1a133746"} Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.590511 4492 generic.go:334] "Generic (PLEG): container finished" podID="dbe81971-8d1f-4681-9bfb-5b13a46a5788" containerID="ad33beecc6ccb3a991f02d101327f026e456d05ca55d63d06df60954716ad9d4" exitCode=0 Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.590904 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hqvtp" event={"ID":"dbe81971-8d1f-4681-9bfb-5b13a46a5788","Type":"ContainerDied","Data":"ad33beecc6ccb3a991f02d101327f026e456d05ca55d63d06df60954716ad9d4"} Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.811495 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.976814 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-nb\") pod \"b74aeb41-307b-4cca-a876-985ea2601650\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.976867 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6gbf\" (UniqueName: \"kubernetes.io/projected/b74aeb41-307b-4cca-a876-985ea2601650-kube-api-access-p6gbf\") pod \"b74aeb41-307b-4cca-a876-985ea2601650\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.976913 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-swift-storage-0\") pod \"b74aeb41-307b-4cca-a876-985ea2601650\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.977127 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-config\") pod \"b74aeb41-307b-4cca-a876-985ea2601650\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.977155 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-sb\") pod \"b74aeb41-307b-4cca-a876-985ea2601650\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " Nov 26 07:05:58 crc kubenswrapper[4492]: I1126 07:05:58.977277 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-svc\") pod \"b74aeb41-307b-4cca-a876-985ea2601650\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.002370 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b74aeb41-307b-4cca-a876-985ea2601650-kube-api-access-p6gbf" (OuterVolumeSpecName: "kube-api-access-p6gbf") pod "b74aeb41-307b-4cca-a876-985ea2601650" (UID: "b74aeb41-307b-4cca-a876-985ea2601650"). InnerVolumeSpecName "kube-api-access-p6gbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.092737 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b74aeb41-307b-4cca-a876-985ea2601650" (UID: "b74aeb41-307b-4cca-a876-985ea2601650"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.094819 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-sb\") pod \"b74aeb41-307b-4cca-a876-985ea2601650\" (UID: \"b74aeb41-307b-4cca-a876-985ea2601650\") " Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.095832 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6gbf\" (UniqueName: \"kubernetes.io/projected/b74aeb41-307b-4cca-a876-985ea2601650-kube-api-access-p6gbf\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:59 crc kubenswrapper[4492]: W1126 07:05:59.096022 4492 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/b74aeb41-307b-4cca-a876-985ea2601650/volumes/kubernetes.io~configmap/ovsdbserver-sb Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.096084 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b74aeb41-307b-4cca-a876-985ea2601650" (UID: "b74aeb41-307b-4cca-a876-985ea2601650"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.111037 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b74aeb41-307b-4cca-a876-985ea2601650" (UID: "b74aeb41-307b-4cca-a876-985ea2601650"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.122553 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b74aeb41-307b-4cca-a876-985ea2601650" (UID: "b74aeb41-307b-4cca-a876-985ea2601650"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.143007 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b74aeb41-307b-4cca-a876-985ea2601650" (UID: "b74aeb41-307b-4cca-a876-985ea2601650"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.159207 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-config" (OuterVolumeSpecName: "config") pod "b74aeb41-307b-4cca-a876-985ea2601650" (UID: "b74aeb41-307b-4cca-a876-985ea2601650"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.197731 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.198004 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.198090 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.198154 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.198229 4492 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b74aeb41-307b-4cca-a876-985ea2601650-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.602360 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" event={"ID":"b74aeb41-307b-4cca-a876-985ea2601650","Type":"ContainerDied","Data":"b760819037d70e8a40944e08cf06b9bfdf0d7287234f9a936de39abc1b8996d1"} Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.602486 4492 scope.go:117] "RemoveContainer" containerID="aa78c3c3c0b9f516e77a60b0ed36169938e0144e9280774dd0d992da1a133746" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.602679 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d4b7d55d5-brf4j" Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.623686 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5497f148-a137-4ee1-9fed-689d83f91c2a","Type":"ContainerStarted","Data":"41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce"} Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.649149 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d4b7d55d5-brf4j"] Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.659678 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d4b7d55d5-brf4j"] Nov 26 07:05:59 crc kubenswrapper[4492]: I1126 07:05:59.666020 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.666008831 podStartE2EDuration="4.666008831s" podCreationTimestamp="2025-11-26 07:05:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:05:59.662057803 +0000 UTC m=+1055.545946101" watchObservedRunningTime="2025-11-26 07:05:59.666008831 +0000 UTC m=+1055.549897130" Nov 26 07:06:00 crc kubenswrapper[4492]: I1126 07:06:00.451547 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b74aeb41-307b-4cca-a876-985ea2601650" path="/var/lib/kubelet/pods/b74aeb41-307b-4cca-a876-985ea2601650/volumes" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.719310 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.854677 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-scripts\") pod \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.854750 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-combined-ca-bundle\") pod \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.855059 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vf7wx\" (UniqueName: \"kubernetes.io/projected/dbe81971-8d1f-4681-9bfb-5b13a46a5788-kube-api-access-vf7wx\") pod \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.855110 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-credential-keys\") pod \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.855274 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-fernet-keys\") pod \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.855370 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-config-data\") pod \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\" (UID: \"dbe81971-8d1f-4681-9bfb-5b13a46a5788\") " Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.870380 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "dbe81971-8d1f-4681-9bfb-5b13a46a5788" (UID: "dbe81971-8d1f-4681-9bfb-5b13a46a5788"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.876551 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "dbe81971-8d1f-4681-9bfb-5b13a46a5788" (UID: "dbe81971-8d1f-4681-9bfb-5b13a46a5788"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.878039 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbe81971-8d1f-4681-9bfb-5b13a46a5788-kube-api-access-vf7wx" (OuterVolumeSpecName: "kube-api-access-vf7wx") pod "dbe81971-8d1f-4681-9bfb-5b13a46a5788" (UID: "dbe81971-8d1f-4681-9bfb-5b13a46a5788"). InnerVolumeSpecName "kube-api-access-vf7wx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.896443 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-scripts" (OuterVolumeSpecName: "scripts") pod "dbe81971-8d1f-4681-9bfb-5b13a46a5788" (UID: "dbe81971-8d1f-4681-9bfb-5b13a46a5788"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.914395 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dbe81971-8d1f-4681-9bfb-5b13a46a5788" (UID: "dbe81971-8d1f-4681-9bfb-5b13a46a5788"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.925276 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-config-data" (OuterVolumeSpecName: "config-data") pod "dbe81971-8d1f-4681-9bfb-5b13a46a5788" (UID: "dbe81971-8d1f-4681-9bfb-5b13a46a5788"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.959337 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.959382 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.959396 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.959409 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vf7wx\" (UniqueName: \"kubernetes.io/projected/dbe81971-8d1f-4681-9bfb-5b13a46a5788-kube-api-access-vf7wx\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.959423 4492 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:01 crc kubenswrapper[4492]: I1126 07:06:01.959432 4492 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/dbe81971-8d1f-4681-9bfb-5b13a46a5788-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.663769 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hqvtp" event={"ID":"dbe81971-8d1f-4681-9bfb-5b13a46a5788","Type":"ContainerDied","Data":"253b25a5cd900b39a0d9a0162ca2039180d1fa2384189f90d9d176fe18d8459b"} Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.664128 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="253b25a5cd900b39a0d9a0162ca2039180d1fa2384189f90d9d176fe18d8459b" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.664225 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hqvtp" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.794448 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.800340 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.836190 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-647b7bfcb8-p68l9"] Nov 26 07:06:02 crc kubenswrapper[4492]: E1126 07:06:02.836785 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe81971-8d1f-4681-9bfb-5b13a46a5788" containerName="keystone-bootstrap" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.836808 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe81971-8d1f-4681-9bfb-5b13a46a5788" containerName="keystone-bootstrap" Nov 26 07:06:02 crc kubenswrapper[4492]: E1126 07:06:02.836853 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b74aeb41-307b-4cca-a876-985ea2601650" containerName="init" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.836859 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b74aeb41-307b-4cca-a876-985ea2601650" containerName="init" Nov 26 07:06:02 crc kubenswrapper[4492]: E1126 07:06:02.836880 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b74aeb41-307b-4cca-a876-985ea2601650" containerName="dnsmasq-dns" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.836887 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b74aeb41-307b-4cca-a876-985ea2601650" containerName="dnsmasq-dns" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.837150 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b74aeb41-307b-4cca-a876-985ea2601650" containerName="dnsmasq-dns" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.837217 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe81971-8d1f-4681-9bfb-5b13a46a5788" containerName="keystone-bootstrap" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.842362 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.855219 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.855334 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.855687 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.855796 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.856668 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-t7xkz" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.856836 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.866057 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-647b7bfcb8-p68l9"] Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.961236 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.990352 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-public-tls-certs\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.990469 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-credential-keys\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.990501 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-fernet-keys\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.990569 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-scripts\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.990594 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-internal-tls-certs\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.990629 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lrrx\" (UniqueName: \"kubernetes.io/projected/16ba3521-ece5-4391-960d-34fce3d66cf7-kube-api-access-6lrrx\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.990694 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-config-data\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:02 crc kubenswrapper[4492]: I1126 07:06:02.990729 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-combined-ca-bundle\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.094640 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-credential-keys\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.094693 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-fernet-keys\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.094819 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-scripts\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.094840 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-internal-tls-certs\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.094892 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lrrx\" (UniqueName: \"kubernetes.io/projected/16ba3521-ece5-4391-960d-34fce3d66cf7-kube-api-access-6lrrx\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.095001 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-config-data\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.095044 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-combined-ca-bundle\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.095112 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-public-tls-certs\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.103102 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-fernet-keys\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.110503 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-internal-tls-certs\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.110671 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-combined-ca-bundle\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.110915 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-scripts\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.114986 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lrrx\" (UniqueName: \"kubernetes.io/projected/16ba3521-ece5-4391-960d-34fce3d66cf7-kube-api-access-6lrrx\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.115624 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-public-tls-certs\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.116184 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-config-data\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.130250 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/16ba3521-ece5-4391-960d-34fce3d66cf7-credential-keys\") pod \"keystone-647b7bfcb8-p68l9\" (UID: \"16ba3521-ece5-4391-960d-34fce3d66cf7\") " pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.151407 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.197359 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.269731 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.674801 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 07:06:03 crc kubenswrapper[4492]: I1126 07:06:03.675091 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 07:06:04 crc kubenswrapper[4492]: I1126 07:06:04.369502 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7bb6557f96-rgc7g" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.153:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.153:8443: connect: connection refused" Nov 26 07:06:04 crc kubenswrapper[4492]: I1126 07:06:04.492502 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6b48756c9b-4pxln" podUID="66834ee2-a38b-4d8d-9195-c4af38dc8a9b" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.154:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.154:8443: connect: connection refused" Nov 26 07:06:04 crc kubenswrapper[4492]: I1126 07:06:04.704428 4492 scope.go:117] "RemoveContainer" containerID="dc9e0c2936235b678729713d959a42dedc964504eff3b369f54d38e4385d29ad" Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.334666 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-647b7bfcb8-p68l9"] Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.625325 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5f9588bddf-km75q" Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.683785 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-86bdc94cc6-ws8xc"] Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.684303 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-86bdc94cc6-ws8xc" podUID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerName="neutron-httpd" containerID="cri-o://16a3767d292dc632cef3268e9d839310095282f2a888535ea9fcc2cd0db9fa99" gracePeriod=30 Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.684052 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-86bdc94cc6-ws8xc" podUID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerName="neutron-api" containerID="cri-o://4a7744552769063790091905df7e367ce4227767f52b93cb69f2cf6f31dd1597" gracePeriod=30 Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.734051 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-9249p" event={"ID":"e34f6949-eab2-4b97-9ba1-54ed3e59da5c","Type":"ContainerStarted","Data":"0139f1254c25e47ff0698b3f33336d095e00582bb7eb1857b5398dd1c39fcbd9"} Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.741108 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-647b7bfcb8-p68l9" event={"ID":"16ba3521-ece5-4391-960d-34fce3d66cf7","Type":"ContainerStarted","Data":"e0d7ceebc9e345427e92e9a34cec01170f3bcc9b98547a1a7b9447397ce08eec"} Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.742615 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-7s6sk" event={"ID":"ad2234e1-842b-4bba-bd21-9fb781403667","Type":"ContainerStarted","Data":"c574a168ff50590cf9930203b023690b39d0cb0a4648651d42d746723916a98b"} Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.754248 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc","Type":"ContainerStarted","Data":"79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564"} Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.770314 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-9249p" podStartSLOduration=3.06481194 podStartE2EDuration="52.770297741s" podCreationTimestamp="2025-11-26 07:05:13 +0000 UTC" firstStartedPulling="2025-11-26 07:05:15.452789011 +0000 UTC m=+1011.336677309" lastFinishedPulling="2025-11-26 07:06:05.158274812 +0000 UTC m=+1061.042163110" observedRunningTime="2025-11-26 07:06:05.75128359 +0000 UTC m=+1061.635171889" watchObservedRunningTime="2025-11-26 07:06:05.770297741 +0000 UTC m=+1061.654186039" Nov 26 07:06:05 crc kubenswrapper[4492]: I1126 07:06:05.783193 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-7s6sk" podStartSLOduration=2.566268654 podStartE2EDuration="52.783167047s" podCreationTimestamp="2025-11-26 07:05:13 +0000 UTC" firstStartedPulling="2025-11-26 07:05:14.551561665 +0000 UTC m=+1010.435449963" lastFinishedPulling="2025-11-26 07:06:04.768460058 +0000 UTC m=+1060.652348356" observedRunningTime="2025-11-26 07:06:05.767527412 +0000 UTC m=+1061.651415700" watchObservedRunningTime="2025-11-26 07:06:05.783167047 +0000 UTC m=+1061.667055344" Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.195459 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.195720 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.244855 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.255234 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.767040 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-647b7bfcb8-p68l9" event={"ID":"16ba3521-ece5-4391-960d-34fce3d66cf7","Type":"ContainerStarted","Data":"074f11efe3b2929e5c2de059424b86eafbb23d036dbd11d290465a4cf78037e4"} Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.767507 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.771135 4492 generic.go:334] "Generic (PLEG): container finished" podID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerID="16a3767d292dc632cef3268e9d839310095282f2a888535ea9fcc2cd0db9fa99" exitCode=0 Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.771212 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86bdc94cc6-ws8xc" event={"ID":"38194db3-f048-45e5-80d6-7dfa8f1f7420","Type":"ContainerDied","Data":"16a3767d292dc632cef3268e9d839310095282f2a888535ea9fcc2cd0db9fa99"} Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.771738 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.771766 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 07:06:06 crc kubenswrapper[4492]: I1126 07:06:06.825373 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-647b7bfcb8-p68l9" podStartSLOduration=4.825358743 podStartE2EDuration="4.825358743s" podCreationTimestamp="2025-11-26 07:06:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:06.81960763 +0000 UTC m=+1062.703495917" watchObservedRunningTime="2025-11-26 07:06:06.825358743 +0000 UTC m=+1062.709247041" Nov 26 07:06:07 crc kubenswrapper[4492]: I1126 07:06:07.791887 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8w6bv" event={"ID":"a497bcf3-f8db-4b08-b5e3-33d050f9901a","Type":"ContainerStarted","Data":"67997b8189653f22b1f851f2eb4fbc6ebd61e5a924b9dc1ab9f1bb3eb873129d"} Nov 26 07:06:07 crc kubenswrapper[4492]: I1126 07:06:07.846092 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-8w6bv" podStartSLOduration=4.244738623 podStartE2EDuration="54.84606896s" podCreationTimestamp="2025-11-26 07:05:13 +0000 UTC" firstStartedPulling="2025-11-26 07:05:15.541825186 +0000 UTC m=+1011.425713484" lastFinishedPulling="2025-11-26 07:06:06.143155523 +0000 UTC m=+1062.027043821" observedRunningTime="2025-11-26 07:06:07.818565658 +0000 UTC m=+1063.702453956" watchObservedRunningTime="2025-11-26 07:06:07.84606896 +0000 UTC m=+1063.729957258" Nov 26 07:06:08 crc kubenswrapper[4492]: I1126 07:06:08.234308 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 07:06:08 crc kubenswrapper[4492]: I1126 07:06:08.234415 4492 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:06:08 crc kubenswrapper[4492]: I1126 07:06:08.276479 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 07:06:09 crc kubenswrapper[4492]: I1126 07:06:09.821883 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 07:06:09 crc kubenswrapper[4492]: I1126 07:06:09.822294 4492 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:06:09 crc kubenswrapper[4492]: I1126 07:06:09.826843 4492 generic.go:334] "Generic (PLEG): container finished" podID="e34f6949-eab2-4b97-9ba1-54ed3e59da5c" containerID="0139f1254c25e47ff0698b3f33336d095e00582bb7eb1857b5398dd1c39fcbd9" exitCode=0 Nov 26 07:06:09 crc kubenswrapper[4492]: I1126 07:06:09.826883 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-9249p" event={"ID":"e34f6949-eab2-4b97-9ba1-54ed3e59da5c","Type":"ContainerDied","Data":"0139f1254c25e47ff0698b3f33336d095e00582bb7eb1857b5398dd1c39fcbd9"} Nov 26 07:06:09 crc kubenswrapper[4492]: I1126 07:06:09.858627 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 07:06:10 crc kubenswrapper[4492]: I1126 07:06:10.837011 4492 generic.go:334] "Generic (PLEG): container finished" podID="ad2234e1-842b-4bba-bd21-9fb781403667" containerID="c574a168ff50590cf9930203b023690b39d0cb0a4648651d42d746723916a98b" exitCode=0 Nov 26 07:06:10 crc kubenswrapper[4492]: I1126 07:06:10.837227 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-7s6sk" event={"ID":"ad2234e1-842b-4bba-bd21-9fb781403667","Type":"ContainerDied","Data":"c574a168ff50590cf9930203b023690b39d0cb0a4648651d42d746723916a98b"} Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.226301 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-9249p" Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.304434 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-combined-ca-bundle\") pod \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.304470 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsjsf\" (UniqueName: \"kubernetes.io/projected/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-kube-api-access-bsjsf\") pod \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.304490 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-db-sync-config-data\") pod \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\" (UID: \"e34f6949-eab2-4b97-9ba1-54ed3e59da5c\") " Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.325287 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-kube-api-access-bsjsf" (OuterVolumeSpecName: "kube-api-access-bsjsf") pod "e34f6949-eab2-4b97-9ba1-54ed3e59da5c" (UID: "e34f6949-eab2-4b97-9ba1-54ed3e59da5c"). InnerVolumeSpecName "kube-api-access-bsjsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.326840 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e34f6949-eab2-4b97-9ba1-54ed3e59da5c" (UID: "e34f6949-eab2-4b97-9ba1-54ed3e59da5c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.360245 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e34f6949-eab2-4b97-9ba1-54ed3e59da5c" (UID: "e34f6949-eab2-4b97-9ba1-54ed3e59da5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.407100 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.407131 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsjsf\" (UniqueName: \"kubernetes.io/projected/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-kube-api-access-bsjsf\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.407146 4492 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e34f6949-eab2-4b97-9ba1-54ed3e59da5c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.855112 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-9249p" event={"ID":"e34f6949-eab2-4b97-9ba1-54ed3e59da5c","Type":"ContainerDied","Data":"f4341e56969edce48aaf4d606b45d52a7bdd10cee1b288d590e3ce4343963540"} Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.855443 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4341e56969edce48aaf4d606b45d52a7bdd10cee1b288d590e3ce4343963540" Nov 26 07:06:11 crc kubenswrapper[4492]: I1126 07:06:11.855557 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-9249p" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.105553 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-849fccf54-r7v7b"] Nov 26 07:06:12 crc kubenswrapper[4492]: E1126 07:06:12.106475 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e34f6949-eab2-4b97-9ba1-54ed3e59da5c" containerName="barbican-db-sync" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.106497 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e34f6949-eab2-4b97-9ba1-54ed3e59da5c" containerName="barbican-db-sync" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.106680 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e34f6949-eab2-4b97-9ba1-54ed3e59da5c" containerName="barbican-db-sync" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.107968 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.117449 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.117763 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.117888 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-kgvv2" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.146364 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-849fccf54-r7v7b"] Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.164820 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7f7fc7947-wl47t"] Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.167455 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.180300 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229548 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-logs\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229590 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1818ac65-a224-4fce-93a7-ca59d1005deb-combined-ca-bundle\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229645 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1818ac65-a224-4fce-93a7-ca59d1005deb-config-data\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229672 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdmlp\" (UniqueName: \"kubernetes.io/projected/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-kube-api-access-zdmlp\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229712 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-combined-ca-bundle\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229734 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-config-data\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229762 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1818ac65-a224-4fce-93a7-ca59d1005deb-config-data-custom\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229790 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-config-data-custom\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229812 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tqmh\" (UniqueName: \"kubernetes.io/projected/1818ac65-a224-4fce-93a7-ca59d1005deb-kube-api-access-4tqmh\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.229852 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1818ac65-a224-4fce-93a7-ca59d1005deb-logs\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.288051 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7f7fc7947-wl47t"] Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.317630 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bbb944fd9-7chfg"] Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.319427 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.322519 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bbb944fd9-7chfg"] Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333452 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1818ac65-a224-4fce-93a7-ca59d1005deb-config-data\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333494 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdmlp\" (UniqueName: \"kubernetes.io/projected/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-kube-api-access-zdmlp\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333539 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-combined-ca-bundle\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333561 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-config-data\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333593 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1818ac65-a224-4fce-93a7-ca59d1005deb-config-data-custom\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333622 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-config-data-custom\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333643 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tqmh\" (UniqueName: \"kubernetes.io/projected/1818ac65-a224-4fce-93a7-ca59d1005deb-kube-api-access-4tqmh\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333668 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1818ac65-a224-4fce-93a7-ca59d1005deb-logs\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333705 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-logs\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.333725 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1818ac65-a224-4fce-93a7-ca59d1005deb-combined-ca-bundle\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.340862 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-config-data-custom\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.347455 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1818ac65-a224-4fce-93a7-ca59d1005deb-logs\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.347731 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-logs\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.347895 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1818ac65-a224-4fce-93a7-ca59d1005deb-combined-ca-bundle\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.350663 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1818ac65-a224-4fce-93a7-ca59d1005deb-config-data-custom\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.350950 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-combined-ca-bundle\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.351230 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1818ac65-a224-4fce-93a7-ca59d1005deb-config-data\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.366201 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-config-data\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.378919 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tqmh\" (UniqueName: \"kubernetes.io/projected/1818ac65-a224-4fce-93a7-ca59d1005deb-kube-api-access-4tqmh\") pod \"barbican-worker-7f7fc7947-wl47t\" (UID: \"1818ac65-a224-4fce-93a7-ca59d1005deb\") " pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.381461 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdmlp\" (UniqueName: \"kubernetes.io/projected/c4a82e4f-758b-48bb-9dba-cd1b59afeab7-kube-api-access-zdmlp\") pod \"barbican-keystone-listener-849fccf54-r7v7b\" (UID: \"c4a82e4f-758b-48bb-9dba-cd1b59afeab7\") " pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.394670 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6979b94f94-5ddp5"] Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.396137 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.399295 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.420019 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6979b94f94-5ddp5"] Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.439264 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-nb\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.439382 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-sb\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.439497 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6h9b2\" (UniqueName: \"kubernetes.io/projected/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-kube-api-access-6h9b2\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.439539 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-config\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.439601 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-svc\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.439628 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-swift-storage-0\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.452525 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.482090 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-7s6sk" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.482766 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7f7fc7947-wl47t" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.540509 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-config-data\") pod \"ad2234e1-842b-4bba-bd21-9fb781403667\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.540555 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hb5w\" (UniqueName: \"kubernetes.io/projected/ad2234e1-842b-4bba-bd21-9fb781403667-kube-api-access-6hb5w\") pod \"ad2234e1-842b-4bba-bd21-9fb781403667\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.540781 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-combined-ca-bundle\") pod \"ad2234e1-842b-4bba-bd21-9fb781403667\" (UID: \"ad2234e1-842b-4bba-bd21-9fb781403667\") " Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541117 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d14378e-a9ce-4c01-854f-a5f9ca277f73-logs\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541141 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-combined-ca-bundle\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541163 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9jfx\" (UniqueName: \"kubernetes.io/projected/0d14378e-a9ce-4c01-854f-a5f9ca277f73-kube-api-access-f9jfx\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541209 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-svc\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541233 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-swift-storage-0\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541294 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541316 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-nb\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541355 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-sb\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541401 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6h9b2\" (UniqueName: \"kubernetes.io/projected/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-kube-api-access-6h9b2\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541422 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data-custom\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.541451 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-config\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.542435 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-config\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.542955 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-svc\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.544328 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-sb\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.544895 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-nb\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.547837 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-swift-storage-0\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.550615 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad2234e1-842b-4bba-bd21-9fb781403667-kube-api-access-6hb5w" (OuterVolumeSpecName: "kube-api-access-6hb5w") pod "ad2234e1-842b-4bba-bd21-9fb781403667" (UID: "ad2234e1-842b-4bba-bd21-9fb781403667"). InnerVolumeSpecName "kube-api-access-6hb5w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.577053 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6h9b2\" (UniqueName: \"kubernetes.io/projected/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-kube-api-access-6h9b2\") pod \"dnsmasq-dns-7bbb944fd9-7chfg\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.604636 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad2234e1-842b-4bba-bd21-9fb781403667" (UID: "ad2234e1-842b-4bba-bd21-9fb781403667"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.622895 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-config-data" (OuterVolumeSpecName: "config-data") pod "ad2234e1-842b-4bba-bd21-9fb781403667" (UID: "ad2234e1-842b-4bba-bd21-9fb781403667"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.645645 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data-custom\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.645720 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d14378e-a9ce-4c01-854f-a5f9ca277f73-logs\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.645743 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-combined-ca-bundle\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.645768 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9jfx\" (UniqueName: \"kubernetes.io/projected/0d14378e-a9ce-4c01-854f-a5f9ca277f73-kube-api-access-f9jfx\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.645872 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.645990 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.646002 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hb5w\" (UniqueName: \"kubernetes.io/projected/ad2234e1-842b-4bba-bd21-9fb781403667-kube-api-access-6hb5w\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.646336 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad2234e1-842b-4bba-bd21-9fb781403667-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.654845 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d14378e-a9ce-4c01-854f-a5f9ca277f73-logs\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.655043 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-combined-ca-bundle\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.663233 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.663648 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data-custom\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.665289 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9jfx\" (UniqueName: \"kubernetes.io/projected/0d14378e-a9ce-4c01-854f-a5f9ca277f73-kube-api-access-f9jfx\") pod \"barbican-api-6979b94f94-5ddp5\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.842131 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.848695 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.877855 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-7s6sk" event={"ID":"ad2234e1-842b-4bba-bd21-9fb781403667","Type":"ContainerDied","Data":"9d4c526a656e05501c5bc26158393c7caa6649bcc4ba40bf9d4ea6f5ef35edf4"} Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.883202 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d4c526a656e05501c5bc26158393c7caa6649bcc4ba40bf9d4ea6f5ef35edf4" Nov 26 07:06:12 crc kubenswrapper[4492]: I1126 07:06:12.881047 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-7s6sk" Nov 26 07:06:13 crc kubenswrapper[4492]: I1126 07:06:13.075848 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-849fccf54-r7v7b"] Nov 26 07:06:13 crc kubenswrapper[4492]: I1126 07:06:13.292094 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7f7fc7947-wl47t"] Nov 26 07:06:13 crc kubenswrapper[4492]: I1126 07:06:13.403042 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6979b94f94-5ddp5"] Nov 26 07:06:13 crc kubenswrapper[4492]: W1126 07:06:13.408450 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d14378e_a9ce_4c01_854f_a5f9ca277f73.slice/crio-485c287263dc31b1985efbfbd7c1195ed2984b98355287da0cacd958ae8bbf7f WatchSource:0}: Error finding container 485c287263dc31b1985efbfbd7c1195ed2984b98355287da0cacd958ae8bbf7f: Status 404 returned error can't find the container with id 485c287263dc31b1985efbfbd7c1195ed2984b98355287da0cacd958ae8bbf7f Nov 26 07:06:13 crc kubenswrapper[4492]: I1126 07:06:13.721479 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bbb944fd9-7chfg"] Nov 26 07:06:13 crc kubenswrapper[4492]: W1126 07:06:13.746126 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6a20d7b_0eda_4124_97f8_0882c8fb0d3e.slice/crio-31cfbe79773431f6d1150428cf52f4879ba7ce8ac6a8328f0036de011c56f7e0 WatchSource:0}: Error finding container 31cfbe79773431f6d1150428cf52f4879ba7ce8ac6a8328f0036de011c56f7e0: Status 404 returned error can't find the container with id 31cfbe79773431f6d1150428cf52f4879ba7ce8ac6a8328f0036de011c56f7e0 Nov 26 07:06:13 crc kubenswrapper[4492]: I1126 07:06:13.905358 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" event={"ID":"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e","Type":"ContainerStarted","Data":"31cfbe79773431f6d1150428cf52f4879ba7ce8ac6a8328f0036de011c56f7e0"} Nov 26 07:06:13 crc kubenswrapper[4492]: I1126 07:06:13.906844 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" event={"ID":"c4a82e4f-758b-48bb-9dba-cd1b59afeab7","Type":"ContainerStarted","Data":"ea8951bbfceb0e412cdc222fafc9cb0fe18091f276d11368246c44a7ee25ad16"} Nov 26 07:06:13 crc kubenswrapper[4492]: I1126 07:06:13.915140 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6979b94f94-5ddp5" event={"ID":"0d14378e-a9ce-4c01-854f-a5f9ca277f73","Type":"ContainerStarted","Data":"ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066"} Nov 26 07:06:13 crc kubenswrapper[4492]: I1126 07:06:13.915248 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6979b94f94-5ddp5" event={"ID":"0d14378e-a9ce-4c01-854f-a5f9ca277f73","Type":"ContainerStarted","Data":"485c287263dc31b1985efbfbd7c1195ed2984b98355287da0cacd958ae8bbf7f"} Nov 26 07:06:13 crc kubenswrapper[4492]: I1126 07:06:13.919713 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7f7fc7947-wl47t" event={"ID":"1818ac65-a224-4fce-93a7-ca59d1005deb","Type":"ContainerStarted","Data":"6c4b344fadf55504b3704a17e56bfc845d907fc68c8d1925fb7177ecd4332074"} Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.369717 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7bb6557f96-rgc7g" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.153:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.153:8443: connect: connection refused" Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.498190 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6b48756c9b-4pxln" podUID="66834ee2-a38b-4d8d-9195-c4af38dc8a9b" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.154:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.154:8443: connect: connection refused" Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.948328 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6979b94f94-5ddp5" event={"ID":"0d14378e-a9ce-4c01-854f-a5f9ca277f73","Type":"ContainerStarted","Data":"03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef"} Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.950468 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.950612 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.961265 4492 generic.go:334] "Generic (PLEG): container finished" podID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" containerID="85bb9f625757a7712a5c63f1cb66108e75b9125f1b320419e3fbe9e23496960f" exitCode=0 Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.961331 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" event={"ID":"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e","Type":"ContainerDied","Data":"85bb9f625757a7712a5c63f1cb66108e75b9125f1b320419e3fbe9e23496960f"} Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.976948 4492 generic.go:334] "Generic (PLEG): container finished" podID="a497bcf3-f8db-4b08-b5e3-33d050f9901a" containerID="67997b8189653f22b1f851f2eb4fbc6ebd61e5a924b9dc1ab9f1bb3eb873129d" exitCode=0 Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.976998 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8w6bv" event={"ID":"a497bcf3-f8db-4b08-b5e3-33d050f9901a","Type":"ContainerDied","Data":"67997b8189653f22b1f851f2eb4fbc6ebd61e5a924b9dc1ab9f1bb3eb873129d"} Nov 26 07:06:14 crc kubenswrapper[4492]: I1126 07:06:14.977917 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6979b94f94-5ddp5" podStartSLOduration=2.977891604 podStartE2EDuration="2.977891604s" podCreationTimestamp="2025-11-26 07:06:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:14.969584966 +0000 UTC m=+1070.853473264" watchObservedRunningTime="2025-11-26 07:06:14.977891604 +0000 UTC m=+1070.861779903" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.863694 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-bf7664774-jvgvf"] Nov 26 07:06:15 crc kubenswrapper[4492]: E1126 07:06:15.864277 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad2234e1-842b-4bba-bd21-9fb781403667" containerName="heat-db-sync" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.864296 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad2234e1-842b-4bba-bd21-9fb781403667" containerName="heat-db-sync" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.864454 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad2234e1-842b-4bba-bd21-9fb781403667" containerName="heat-db-sync" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.865549 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.867913 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.868185 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.896132 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-bf7664774-jvgvf"] Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.952623 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t48vq\" (UniqueName: \"kubernetes.io/projected/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-kube-api-access-t48vq\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.952675 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-combined-ca-bundle\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.952721 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-internal-tls-certs\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.952741 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-config-data-custom\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.952766 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-public-tls-certs\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.952801 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-config-data\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.952883 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-logs\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:15 crc kubenswrapper[4492]: I1126 07:06:15.992465 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" event={"ID":"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e","Type":"ContainerStarted","Data":"b1e041f81b9df4fe926009043b35670f73b5f115d1907ce9066aa94cb1ad3839"} Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.054604 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-public-tls-certs\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.054669 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-config-data\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.054758 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-logs\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.054860 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t48vq\" (UniqueName: \"kubernetes.io/projected/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-kube-api-access-t48vq\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.054889 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-combined-ca-bundle\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.054947 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-internal-tls-certs\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.054971 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-config-data-custom\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.059891 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-logs\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.067827 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-config-data\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.072808 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-combined-ca-bundle\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.073959 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-config-data-custom\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.074566 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-internal-tls-certs\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.096521 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-public-tls-certs\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.108726 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t48vq\" (UniqueName: \"kubernetes.io/projected/dcf6c9d0-b966-4d3c-9e14-3db7dccdc916-kube-api-access-t48vq\") pod \"barbican-api-bf7664774-jvgvf\" (UID: \"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916\") " pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.187605 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.910350 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.940836 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" podStartSLOduration=4.940812555 podStartE2EDuration="4.940812555s" podCreationTimestamp="2025-11-26 07:06:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:16.011728823 +0000 UTC m=+1071.895617121" watchObservedRunningTime="2025-11-26 07:06:16.940812555 +0000 UTC m=+1072.824700852" Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.984271 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-db-sync-config-data\") pod \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.984360 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5744\" (UniqueName: \"kubernetes.io/projected/a497bcf3-f8db-4b08-b5e3-33d050f9901a-kube-api-access-b5744\") pod \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.984404 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a497bcf3-f8db-4b08-b5e3-33d050f9901a-etc-machine-id\") pod \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.984433 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-scripts\") pod \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.984501 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-combined-ca-bundle\") pod \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.984551 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-config-data\") pod \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\" (UID: \"a497bcf3-f8db-4b08-b5e3-33d050f9901a\") " Nov 26 07:06:16 crc kubenswrapper[4492]: I1126 07:06:16.985950 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a497bcf3-f8db-4b08-b5e3-33d050f9901a-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a497bcf3-f8db-4b08-b5e3-33d050f9901a" (UID: "a497bcf3-f8db-4b08-b5e3-33d050f9901a"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.008329 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-scripts" (OuterVolumeSpecName: "scripts") pod "a497bcf3-f8db-4b08-b5e3-33d050f9901a" (UID: "a497bcf3-f8db-4b08-b5e3-33d050f9901a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.013666 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "a497bcf3-f8db-4b08-b5e3-33d050f9901a" (UID: "a497bcf3-f8db-4b08-b5e3-33d050f9901a"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.014602 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a497bcf3-f8db-4b08-b5e3-33d050f9901a-kube-api-access-b5744" (OuterVolumeSpecName: "kube-api-access-b5744") pod "a497bcf3-f8db-4b08-b5e3-33d050f9901a" (UID: "a497bcf3-f8db-4b08-b5e3-33d050f9901a"). InnerVolumeSpecName "kube-api-access-b5744". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.023563 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a497bcf3-f8db-4b08-b5e3-33d050f9901a" (UID: "a497bcf3-f8db-4b08-b5e3-33d050f9901a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.038977 4492 generic.go:334] "Generic (PLEG): container finished" podID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerID="4a7744552769063790091905df7e367ce4227767f52b93cb69f2cf6f31dd1597" exitCode=0 Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.039051 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86bdc94cc6-ws8xc" event={"ID":"38194db3-f048-45e5-80d6-7dfa8f1f7420","Type":"ContainerDied","Data":"4a7744552769063790091905df7e367ce4227767f52b93cb69f2cf6f31dd1597"} Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.042262 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8w6bv" event={"ID":"a497bcf3-f8db-4b08-b5e3-33d050f9901a","Type":"ContainerDied","Data":"ec11777edaeb307bb33ebe64ba4cf403076ebd787c82dea426689828c7cf0589"} Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.042299 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec11777edaeb307bb33ebe64ba4cf403076ebd787c82dea426689828c7cf0589" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.042415 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.044373 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8w6bv" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.086725 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.086752 4492 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.086763 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5744\" (UniqueName: \"kubernetes.io/projected/a497bcf3-f8db-4b08-b5e3-33d050f9901a-kube-api-access-b5744\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.086773 4492 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a497bcf3-f8db-4b08-b5e3-33d050f9901a-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.086781 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.115576 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-config-data" (OuterVolumeSpecName: "config-data") pod "a497bcf3-f8db-4b08-b5e3-33d050f9901a" (UID: "a497bcf3-f8db-4b08-b5e3-33d050f9901a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.188528 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a497bcf3-f8db-4b08-b5e3-33d050f9901a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.330664 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:06:17 crc kubenswrapper[4492]: E1126 07:06:17.331884 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a497bcf3-f8db-4b08-b5e3-33d050f9901a" containerName="cinder-db-sync" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.331906 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a497bcf3-f8db-4b08-b5e3-33d050f9901a" containerName="cinder-db-sync" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.332334 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a497bcf3-f8db-4b08-b5e3-33d050f9901a" containerName="cinder-db-sync" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.334328 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.352590 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.366286 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.399361 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.399452 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.399480 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q7gr\" (UniqueName: \"kubernetes.io/projected/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-kube-api-access-2q7gr\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.399540 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-scripts\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.399563 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.399627 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.434230 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bbb944fd9-7chfg"] Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.488264 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79cdbd64cc-bkvb2"] Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.489941 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.501970 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-nb\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502051 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p29bw\" (UniqueName: \"kubernetes.io/projected/d2ee530e-90d8-4bc3-874a-d42b55783ee8-kube-api-access-p29bw\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502078 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-sb\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502102 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502148 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502192 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-swift-storage-0\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502210 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-svc\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502249 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502267 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2q7gr\" (UniqueName: \"kubernetes.io/projected/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-kube-api-access-2q7gr\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502343 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-scripts\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502363 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-config\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502384 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.502456 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.509520 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.515669 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.519292 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79cdbd64cc-bkvb2"] Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.520499 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-scripts\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.524964 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.542234 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q7gr\" (UniqueName: \"kubernetes.io/projected/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-kube-api-access-2q7gr\") pod \"cinder-scheduler-0\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.603964 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-nb\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.604356 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p29bw\" (UniqueName: \"kubernetes.io/projected/d2ee530e-90d8-4bc3-874a-d42b55783ee8-kube-api-access-p29bw\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.604396 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-sb\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.604483 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-swift-storage-0\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.604505 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-svc\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.604644 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-config\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.605356 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-sb\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.605751 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-config\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.606214 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-svc\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.605157 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-nb\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.606493 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-swift-storage-0\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.626020 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p29bw\" (UniqueName: \"kubernetes.io/projected/d2ee530e-90d8-4bc3-874a-d42b55783ee8-kube-api-access-p29bw\") pod \"dnsmasq-dns-79cdbd64cc-bkvb2\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.673624 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.674956 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.680390 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.690361 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.733056 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.811240 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.811344 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-logs\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.811380 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.811426 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data-custom\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.811472 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-scripts\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.811940 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.812043 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6c5t\" (UniqueName: \"kubernetes.io/projected/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-kube-api-access-w6c5t\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.900539 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.913717 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-scripts\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.913786 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.913832 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6c5t\" (UniqueName: \"kubernetes.io/projected/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-kube-api-access-w6c5t\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.913880 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.913901 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-logs\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.913922 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.913968 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data-custom\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.915427 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-logs\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.915740 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.920785 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.922759 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data-custom\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.922851 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-scripts\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.923122 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.933672 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6c5t\" (UniqueName: \"kubernetes.io/projected/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-kube-api-access-w6c5t\") pod \"cinder-api-0\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " pod="openstack/cinder-api-0" Nov 26 07:06:17 crc kubenswrapper[4492]: I1126 07:06:17.997686 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:06:19 crc kubenswrapper[4492]: I1126 07:06:19.062420 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" podUID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" containerName="dnsmasq-dns" containerID="cri-o://b1e041f81b9df4fe926009043b35670f73b5f115d1907ce9066aa94cb1ad3839" gracePeriod=10 Nov 26 07:06:19 crc kubenswrapper[4492]: I1126 07:06:19.441817 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:06:19 crc kubenswrapper[4492]: I1126 07:06:19.441890 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:06:19 crc kubenswrapper[4492]: I1126 07:06:19.562289 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:06:20 crc kubenswrapper[4492]: I1126 07:06:20.081904 4492 generic.go:334] "Generic (PLEG): container finished" podID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" containerID="b1e041f81b9df4fe926009043b35670f73b5f115d1907ce9066aa94cb1ad3839" exitCode=0 Nov 26 07:06:20 crc kubenswrapper[4492]: I1126 07:06:20.082132 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" event={"ID":"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e","Type":"ContainerDied","Data":"b1e041f81b9df4fe926009043b35670f73b5f115d1907ce9066aa94cb1ad3839"} Nov 26 07:06:21 crc kubenswrapper[4492]: W1126 07:06:21.121336 4492 container.go:586] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod689a4cb6_8c65_4db6_b3d2_b825ca6ddd7e.slice/crio-446202fa41cc55dab0a5a49c4fbdd025274941ac8d22ff4c581e59596786030f": error while statting cgroup v2: [unable to parse /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod689a4cb6_8c65_4db6_b3d2_b825ca6ddd7e.slice/crio-446202fa41cc55dab0a5a49c4fbdd025274941ac8d22ff4c581e59596786030f/memory.stat: read /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod689a4cb6_8c65_4db6_b3d2_b825ca6ddd7e.slice/crio-446202fa41cc55dab0a5a49c4fbdd025274941ac8d22ff4c581e59596786030f/memory.stat: no such device], continuing to push stats Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.104838 4492 generic.go:334] "Generic (PLEG): container finished" podID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerID="ee2e730f36b7b0b2ba530aaaba90beffab19bcde1442721beb67c96945a7f4c6" exitCode=137 Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.104882 4492 generic.go:334] "Generic (PLEG): container finished" podID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerID="c0343fac632313410975f87ee4e707640869581c34278f0a595e9c9f79e1b7ee" exitCode=137 Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.104897 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6688b9cf7f-qsr5d" event={"ID":"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e","Type":"ContainerDied","Data":"ee2e730f36b7b0b2ba530aaaba90beffab19bcde1442721beb67c96945a7f4c6"} Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.104983 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6688b9cf7f-qsr5d" event={"ID":"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e","Type":"ContainerDied","Data":"c0343fac632313410975f87ee4e707640869581c34278f0a595e9c9f79e1b7ee"} Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.107624 4492 generic.go:334] "Generic (PLEG): container finished" podID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerID="d79f7f9b2ee5ec6e625bea73ac1fee9161ae5491dcd3ca6ae78813c618c41561" exitCode=137 Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.107671 4492 generic.go:334] "Generic (PLEG): container finished" podID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerID="ed025a70c3520f1170320f171572560c3fe7c72ad4df5765db7df0c2d5de6892" exitCode=137 Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.107689 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-644687db57-zx74g" event={"ID":"c4316787-8af1-40ef-995a-0f8aabd1bf11","Type":"ContainerDied","Data":"d79f7f9b2ee5ec6e625bea73ac1fee9161ae5491dcd3ca6ae78813c618c41561"} Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.107708 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-644687db57-zx74g" event={"ID":"c4316787-8af1-40ef-995a-0f8aabd1bf11","Type":"ContainerDied","Data":"ed025a70c3520f1170320f171572560c3fe7c72ad4df5765db7df0c2d5de6892"} Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.406075 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.513654 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-config\") pod \"38194db3-f048-45e5-80d6-7dfa8f1f7420\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.513742 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-458s4\" (UniqueName: \"kubernetes.io/projected/38194db3-f048-45e5-80d6-7dfa8f1f7420-kube-api-access-458s4\") pod \"38194db3-f048-45e5-80d6-7dfa8f1f7420\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.513939 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-ovndb-tls-certs\") pod \"38194db3-f048-45e5-80d6-7dfa8f1f7420\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.513976 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-combined-ca-bundle\") pod \"38194db3-f048-45e5-80d6-7dfa8f1f7420\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.514044 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-httpd-config\") pod \"38194db3-f048-45e5-80d6-7dfa8f1f7420\" (UID: \"38194db3-f048-45e5-80d6-7dfa8f1f7420\") " Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.520598 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38194db3-f048-45e5-80d6-7dfa8f1f7420-kube-api-access-458s4" (OuterVolumeSpecName: "kube-api-access-458s4") pod "38194db3-f048-45e5-80d6-7dfa8f1f7420" (UID: "38194db3-f048-45e5-80d6-7dfa8f1f7420"). InnerVolumeSpecName "kube-api-access-458s4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.551477 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "38194db3-f048-45e5-80d6-7dfa8f1f7420" (UID: "38194db3-f048-45e5-80d6-7dfa8f1f7420"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.613204 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-config" (OuterVolumeSpecName: "config") pod "38194db3-f048-45e5-80d6-7dfa8f1f7420" (UID: "38194db3-f048-45e5-80d6-7dfa8f1f7420"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.621403 4492 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.621428 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.621441 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-458s4\" (UniqueName: \"kubernetes.io/projected/38194db3-f048-45e5-80d6-7dfa8f1f7420-kube-api-access-458s4\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.649297 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38194db3-f048-45e5-80d6-7dfa8f1f7420" (UID: "38194db3-f048-45e5-80d6-7dfa8f1f7420"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.692434 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "38194db3-f048-45e5-80d6-7dfa8f1f7420" (UID: "38194db3-f048-45e5-80d6-7dfa8f1f7420"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.725424 4492 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:22 crc kubenswrapper[4492]: I1126 07:06:22.725464 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38194db3-f048-45e5-80d6-7dfa8f1f7420-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.123034 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86bdc94cc6-ws8xc" event={"ID":"38194db3-f048-45e5-80d6-7dfa8f1f7420","Type":"ContainerDied","Data":"4111ab1bb3fe986ae32d2959570b7d0df410889fd578a7b0d39600e7a279ea9c"} Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.123163 4492 scope.go:117] "RemoveContainer" containerID="16a3767d292dc632cef3268e9d839310095282f2a888535ea9fcc2cd0db9fa99" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.123321 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86bdc94cc6-ws8xc" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.163100 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-86bdc94cc6-ws8xc"] Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.173127 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-86bdc94cc6-ws8xc"] Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.666158 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.671115 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.672969 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-644687db57-zx74g" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.763640 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-bf7664774-jvgvf"] Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.863961 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-config\") pod \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864015 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-horizon-secret-key\") pod \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864036 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-sb\") pod \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864058 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-scripts\") pod \"c4316787-8af1-40ef-995a-0f8aabd1bf11\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864076 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-config-data\") pod \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864115 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-swift-storage-0\") pod \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864152 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-nb\") pod \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864231 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-config-data\") pod \"c4316787-8af1-40ef-995a-0f8aabd1bf11\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864263 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6h9b2\" (UniqueName: \"kubernetes.io/projected/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-kube-api-access-6h9b2\") pod \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864377 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x787c\" (UniqueName: \"kubernetes.io/projected/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-kube-api-access-x787c\") pod \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864435 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-scripts\") pod \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864496 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-svc\") pod \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\" (UID: \"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864522 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4316787-8af1-40ef-995a-0f8aabd1bf11-logs\") pod \"c4316787-8af1-40ef-995a-0f8aabd1bf11\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864604 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-logs\") pod \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\" (UID: \"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864618 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2vbx\" (UniqueName: \"kubernetes.io/projected/c4316787-8af1-40ef-995a-0f8aabd1bf11-kube-api-access-s2vbx\") pod \"c4316787-8af1-40ef-995a-0f8aabd1bf11\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.864637 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c4316787-8af1-40ef-995a-0f8aabd1bf11-horizon-secret-key\") pod \"c4316787-8af1-40ef-995a-0f8aabd1bf11\" (UID: \"c4316787-8af1-40ef-995a-0f8aabd1bf11\") " Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.888354 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4316787-8af1-40ef-995a-0f8aabd1bf11-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "c4316787-8af1-40ef-995a-0f8aabd1bf11" (UID: "c4316787-8af1-40ef-995a-0f8aabd1bf11"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.889287 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4316787-8af1-40ef-995a-0f8aabd1bf11-logs" (OuterVolumeSpecName: "logs") pod "c4316787-8af1-40ef-995a-0f8aabd1bf11" (UID: "c4316787-8af1-40ef-995a-0f8aabd1bf11"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.889317 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-logs" (OuterVolumeSpecName: "logs") pod "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" (UID: "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.889539 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" (UID: "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.906322 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-kube-api-access-6h9b2" (OuterVolumeSpecName: "kube-api-access-6h9b2") pod "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" (UID: "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e"). InnerVolumeSpecName "kube-api-access-6h9b2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.906428 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4316787-8af1-40ef-995a-0f8aabd1bf11-kube-api-access-s2vbx" (OuterVolumeSpecName: "kube-api-access-s2vbx") pod "c4316787-8af1-40ef-995a-0f8aabd1bf11" (UID: "c4316787-8af1-40ef-995a-0f8aabd1bf11"). InnerVolumeSpecName "kube-api-access-s2vbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.916311 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-kube-api-access-x787c" (OuterVolumeSpecName: "kube-api-access-x787c") pod "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" (UID: "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e"). InnerVolumeSpecName "kube-api-access-x787c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.954316 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-config-data" (OuterVolumeSpecName: "config-data") pod "c4316787-8af1-40ef-995a-0f8aabd1bf11" (UID: "c4316787-8af1-40ef-995a-0f8aabd1bf11"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.954826 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" (UID: "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.958824 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-config-data" (OuterVolumeSpecName: "config-data") pod "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" (UID: "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.959599 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-scripts" (OuterVolumeSpecName: "scripts") pod "c4316787-8af1-40ef-995a-0f8aabd1bf11" (UID: "c4316787-8af1-40ef-995a-0f8aabd1bf11"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968284 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4316787-8af1-40ef-995a-0f8aabd1bf11-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968324 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968335 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2vbx\" (UniqueName: \"kubernetes.io/projected/c4316787-8af1-40ef-995a-0f8aabd1bf11-kube-api-access-s2vbx\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968349 4492 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c4316787-8af1-40ef-995a-0f8aabd1bf11-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968359 4492 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968369 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968378 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968387 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968397 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c4316787-8af1-40ef-995a-0f8aabd1bf11-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968406 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6h9b2\" (UniqueName: \"kubernetes.io/projected/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-kube-api-access-6h9b2\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.968416 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x787c\" (UniqueName: \"kubernetes.io/projected/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-kube-api-access-x787c\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.969396 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-scripts" (OuterVolumeSpecName: "scripts") pod "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" (UID: "689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:23 crc kubenswrapper[4492]: I1126 07:06:23.992519 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" (UID: "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.005116 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" (UID: "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.015603 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" (UID: "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.019766 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-config" (OuterVolumeSpecName: "config") pod "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" (UID: "a6a20d7b-0eda-4124-97f8-0882c8fb0d3e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.073063 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.073094 4492 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.073107 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.073119 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.073127 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.132781 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.134796 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" event={"ID":"a6a20d7b-0eda-4124-97f8-0882c8fb0d3e","Type":"ContainerDied","Data":"31cfbe79773431f6d1150428cf52f4879ba7ce8ac6a8328f0036de011c56f7e0"} Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.140746 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6688b9cf7f-qsr5d" event={"ID":"689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e","Type":"ContainerDied","Data":"446202fa41cc55dab0a5a49c4fbdd025274941ac8d22ff4c581e59596786030f"} Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.140795 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6688b9cf7f-qsr5d" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.147529 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-644687db57-zx74g" event={"ID":"c4316787-8af1-40ef-995a-0f8aabd1bf11","Type":"ContainerDied","Data":"33504fda659323afc030cf03ee19e44ce5ba97ff096c93e24ce830ad8553392e"} Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.148034 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-644687db57-zx74g" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.222051 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bbb944fd9-7chfg"] Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.250145 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bbb944fd9-7chfg"] Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.257216 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-644687db57-zx74g"] Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.270237 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-644687db57-zx74g"] Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.278503 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6688b9cf7f-qsr5d"] Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.278617 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6688b9cf7f-qsr5d"] Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.388093 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.451720 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38194db3-f048-45e5-80d6-7dfa8f1f7420" path="/var/lib/kubelet/pods/38194db3-f048-45e5-80d6-7dfa8f1f7420/volumes" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.452894 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" path="/var/lib/kubelet/pods/689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e/volumes" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.453656 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" path="/var/lib/kubelet/pods/a6a20d7b-0eda-4124-97f8-0882c8fb0d3e/volumes" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.454867 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4316787-8af1-40ef-995a-0f8aabd1bf11" path="/var/lib/kubelet/pods/c4316787-8af1-40ef-995a-0f8aabd1bf11/volumes" Nov 26 07:06:24 crc kubenswrapper[4492]: I1126 07:06:24.455956 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:25 crc kubenswrapper[4492]: I1126 07:06:25.610437 4492 scope.go:117] "RemoveContainer" containerID="4a7744552769063790091905df7e367ce4227767f52b93cb69f2cf6f31dd1597" Nov 26 07:06:25 crc kubenswrapper[4492]: I1126 07:06:25.871060 4492 scope.go:117] "RemoveContainer" containerID="b1e041f81b9df4fe926009043b35670f73b5f115d1907ce9066aa94cb1ad3839" Nov 26 07:06:25 crc kubenswrapper[4492]: I1126 07:06:25.962050 4492 scope.go:117] "RemoveContainer" containerID="85bb9f625757a7712a5c63f1cb66108e75b9125f1b320419e3fbe9e23496960f" Nov 26 07:06:25 crc kubenswrapper[4492]: I1126 07:06:25.995886 4492 scope.go:117] "RemoveContainer" containerID="ee2e730f36b7b0b2ba530aaaba90beffab19bcde1442721beb67c96945a7f4c6" Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.164592 4492 scope.go:117] "RemoveContainer" containerID="c0343fac632313410975f87ee4e707640869581c34278f0a595e9c9f79e1b7ee" Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.196835 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7f7fc7947-wl47t" event={"ID":"1818ac65-a224-4fce-93a7-ca59d1005deb","Type":"ContainerStarted","Data":"9c61965e00fcebe38780d81d83142174710b2cf93e92a872bd014b1ecfa07450"} Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.217532 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-bf7664774-jvgvf" event={"ID":"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916","Type":"ContainerStarted","Data":"5a9dbbdefe6152b890beab46eb2f645eab4d0eabc22bb74a9069165aa0f5fc69"} Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.217591 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-bf7664774-jvgvf" event={"ID":"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916","Type":"ContainerStarted","Data":"e40731e94dfd5e3e194fa4b052bcc47a6178cc9db5c997895f3dcb03ff7a4f84"} Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.230056 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" event={"ID":"c4a82e4f-758b-48bb-9dba-cd1b59afeab7","Type":"ContainerStarted","Data":"7b23c06f19178a014bf0a1d039562e043870dd46860ceda25591d8d69a55ff86"} Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.290319 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.317546 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79cdbd64cc-bkvb2"] Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.362772 4492 scope.go:117] "RemoveContainer" containerID="d79f7f9b2ee5ec6e625bea73ac1fee9161ae5491dcd3ca6ae78813c618c41561" Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.430970 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:06:26 crc kubenswrapper[4492]: E1126 07:06:26.450944 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" Nov 26 07:06:26 crc kubenswrapper[4492]: I1126 07:06:26.575984 4492 scope.go:117] "RemoveContainer" containerID="ed025a70c3520f1170320f171572560c3fe7c72ad4df5765db7df0c2d5de6892" Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.237445 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.286262 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6c6c7b43-f00e-4013-ab11-b0471d7d1f68","Type":"ContainerStarted","Data":"4ed06661faf20c3987734974ea64c5bb0920b28a5507dffc3e42c840b0abb3e5"} Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.300313 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc","Type":"ContainerStarted","Data":"06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767"} Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.300936 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="ceilometer-notification-agent" containerID="cri-o://5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9" gracePeriod=30 Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.301463 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.301723 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="proxy-httpd" containerID="cri-o://06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767" gracePeriod=30 Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.301768 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="sg-core" containerID="cri-o://79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564" gracePeriod=30 Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.319973 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7f7fc7947-wl47t" event={"ID":"1818ac65-a224-4fce-93a7-ca59d1005deb","Type":"ContainerStarted","Data":"99558219b6bfa1efe81c4d457e6e3de3512dcec8d6cda57b9f637ab1d0d93f8a"} Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.327756 4492 generic.go:334] "Generic (PLEG): container finished" podID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" containerID="ab9765e78ad50dd8926861bae83f8adc9b5e1d55ec08346c62cdb48b5a0d5a6d" exitCode=0 Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.327819 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" event={"ID":"d2ee530e-90d8-4bc3-874a-d42b55783ee8","Type":"ContainerDied","Data":"ab9765e78ad50dd8926861bae83f8adc9b5e1d55ec08346c62cdb48b5a0d5a6d"} Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.327847 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" event={"ID":"d2ee530e-90d8-4bc3-874a-d42b55783ee8","Type":"ContainerStarted","Data":"dd66dbe282b0316c4a29484374057ce9a895bbee69339c7c65624365d7eed085"} Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.387920 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7f7fc7947-wl47t" podStartSLOduration=5.210663143 podStartE2EDuration="15.387901371s" podCreationTimestamp="2025-11-26 07:06:12 +0000 UTC" firstStartedPulling="2025-11-26 07:06:13.32507553 +0000 UTC m=+1069.208963828" lastFinishedPulling="2025-11-26 07:06:23.502313757 +0000 UTC m=+1079.386202056" observedRunningTime="2025-11-26 07:06:27.375925594 +0000 UTC m=+1083.259813892" watchObservedRunningTime="2025-11-26 07:06:27.387901371 +0000 UTC m=+1083.271789669" Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.388861 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-bf7664774-jvgvf" event={"ID":"dcf6c9d0-b966-4d3c-9e14-3db7dccdc916","Type":"ContainerStarted","Data":"22350e6cc0fccc6f0bbca56851da9dcc97e7936431a51709d0b4f985674dc4cd"} Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.389270 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.389305 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.409770 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" event={"ID":"c4a82e4f-758b-48bb-9dba-cd1b59afeab7","Type":"ContainerStarted","Data":"419b4bc31046dd717612f2d106f6e908095938c25ee936941693c98f19a8d68f"} Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.413127 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-bf7664774-jvgvf" podStartSLOduration=12.413105861 podStartE2EDuration="12.413105861s" podCreationTimestamp="2025-11-26 07:06:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:27.411238108 +0000 UTC m=+1083.295126407" watchObservedRunningTime="2025-11-26 07:06:27.413105861 +0000 UTC m=+1083.296994159" Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.424623 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9","Type":"ContainerStarted","Data":"78a6e5d18ce111e1561ff2f1d6b0ed38cb78c4f4d5edf4a30440ae3bf7e6a41b"} Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.505482 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.538078 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-849fccf54-r7v7b" podStartSLOduration=5.192678518 podStartE2EDuration="15.538057112s" podCreationTimestamp="2025-11-26 07:06:12 +0000 UTC" firstStartedPulling="2025-11-26 07:06:13.110064659 +0000 UTC m=+1068.993952957" lastFinishedPulling="2025-11-26 07:06:23.455443252 +0000 UTC m=+1079.339331551" observedRunningTime="2025-11-26 07:06:27.47049912 +0000 UTC m=+1083.354387419" watchObservedRunningTime="2025-11-26 07:06:27.538057112 +0000 UTC m=+1083.421945410" Nov 26 07:06:27 crc kubenswrapper[4492]: I1126 07:06:27.846255 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7bbb944fd9-7chfg" podUID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.165:5353: i/o timeout" Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.002811 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.115540 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7656457cb-pbp6j" Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.453843 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9","Type":"ContainerStarted","Data":"077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee"} Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.462029 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" event={"ID":"d2ee530e-90d8-4bc3-874a-d42b55783ee8","Type":"ContainerStarted","Data":"2d85f8298b8602e2feb7c10f7e331b8691efd5ed2c6c5ba4fc430de01605fa0d"} Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.466902 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.476288 4492 generic.go:334] "Generic (PLEG): container finished" podID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerID="06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767" exitCode=0 Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.476317 4492 generic.go:334] "Generic (PLEG): container finished" podID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerID="79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564" exitCode=2 Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.477636 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc","Type":"ContainerDied","Data":"06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767"} Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.477659 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc","Type":"ContainerDied","Data":"79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564"} Nov 26 07:06:28 crc kubenswrapper[4492]: I1126 07:06:28.493663 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" podStartSLOduration=11.493641781000001 podStartE2EDuration="11.493641781s" podCreationTimestamp="2025-11-26 07:06:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:28.493489846 +0000 UTC m=+1084.377378144" watchObservedRunningTime="2025-11-26 07:06:28.493641781 +0000 UTC m=+1084.377530080" Nov 26 07:06:29 crc kubenswrapper[4492]: I1126 07:06:29.490905 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6c6c7b43-f00e-4013-ab11-b0471d7d1f68","Type":"ContainerStarted","Data":"16f53fca6cf79164a90bde5dbdcf56b575255f3a74db4594867316f492ce58fc"} Nov 26 07:06:29 crc kubenswrapper[4492]: I1126 07:06:29.491504 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6c6c7b43-f00e-4013-ab11-b0471d7d1f68","Type":"ContainerStarted","Data":"cecf3a295e64919cdc40f558352d93e08f30c11c0b7f2963a84b766132ced8de"} Nov 26 07:06:29 crc kubenswrapper[4492]: I1126 07:06:29.494137 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9","Type":"ContainerStarted","Data":"5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1"} Nov 26 07:06:29 crc kubenswrapper[4492]: I1126 07:06:29.494285 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 07:06:29 crc kubenswrapper[4492]: I1126 07:06:29.494336 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerName="cinder-api" containerID="cri-o://5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1" gracePeriod=30 Nov 26 07:06:29 crc kubenswrapper[4492]: I1126 07:06:29.494354 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerName="cinder-api-log" containerID="cri-o://077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee" gracePeriod=30 Nov 26 07:06:29 crc kubenswrapper[4492]: I1126 07:06:29.513958 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=11.421297451 podStartE2EDuration="12.513924845s" podCreationTimestamp="2025-11-26 07:06:17 +0000 UTC" firstStartedPulling="2025-11-26 07:06:26.424998393 +0000 UTC m=+1082.308886691" lastFinishedPulling="2025-11-26 07:06:27.517625788 +0000 UTC m=+1083.401514085" observedRunningTime="2025-11-26 07:06:29.510552604 +0000 UTC m=+1085.394440902" watchObservedRunningTime="2025-11-26 07:06:29.513924845 +0000 UTC m=+1085.397813132" Nov 26 07:06:29 crc kubenswrapper[4492]: I1126 07:06:29.545684 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=12.545662617 podStartE2EDuration="12.545662617s" podCreationTimestamp="2025-11-26 07:06:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:29.533119935 +0000 UTC m=+1085.417008223" watchObservedRunningTime="2025-11-26 07:06:29.545662617 +0000 UTC m=+1085.429550906" Nov 26 07:06:29 crc kubenswrapper[4492]: I1126 07:06:29.968251 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6b48756c9b-4pxln" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.072388 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.119098 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.119877 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7bb6557f96-rgc7g"] Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.201142 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data\") pod \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.201684 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-logs\") pod \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.201714 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-combined-ca-bundle\") pod \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.201813 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-etc-machine-id\") pod \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.201888 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6c5t\" (UniqueName: \"kubernetes.io/projected/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-kube-api-access-w6c5t\") pod \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.201921 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data-custom\") pod \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.201966 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-scripts\") pod \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\" (UID: \"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9\") " Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.203464 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" (UID: "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.204417 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-logs" (OuterVolumeSpecName: "logs") pod "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" (UID: "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.209797 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" (UID: "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.211056 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-kube-api-access-w6c5t" (OuterVolumeSpecName: "kube-api-access-w6c5t") pod "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" (UID: "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9"). InnerVolumeSpecName "kube-api-access-w6c5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.215381 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-scripts" (OuterVolumeSpecName: "scripts") pod "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" (UID: "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.240573 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" (UID: "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.276593 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data" (OuterVolumeSpecName: "config-data") pod "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" (UID: "a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.303437 4492 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.303470 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6c5t\" (UniqueName: \"kubernetes.io/projected/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-kube-api-access-w6c5t\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.303485 4492 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.303495 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.303505 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.303516 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.303525 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.506983 4492 generic.go:334] "Generic (PLEG): container finished" podID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerID="5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1" exitCode=0 Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.507029 4492 generic.go:334] "Generic (PLEG): container finished" podID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerID="077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee" exitCode=143 Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.507042 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9","Type":"ContainerDied","Data":"5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1"} Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.507089 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9","Type":"ContainerDied","Data":"077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee"} Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.507102 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9","Type":"ContainerDied","Data":"78a6e5d18ce111e1561ff2f1d6b0ed38cb78c4f4d5edf4a30440ae3bf7e6a41b"} Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.507124 4492 scope.go:117] "RemoveContainer" containerID="5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.507420 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7bb6557f96-rgc7g" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon-log" containerID="cri-o://1ae7aee7de996ae15f21a63313f4987ac66b6771692515d2a9f4fbf55b2e1331" gracePeriod=30 Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.507978 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7bb6557f96-rgc7g" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon" containerID="cri-o://43fafd6a195677c69054be8121016986713c956dc138b464b122d51ce9a8af53" gracePeriod=30 Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.508476 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.546248 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.549671 4492 scope.go:117] "RemoveContainer" containerID="077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.554071 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.560651 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561038 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerName="horizon" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561056 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerName="horizon" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561067 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerName="cinder-api" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561074 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerName="cinder-api" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561086 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerName="neutron-api" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561093 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerName="neutron-api" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561110 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" containerName="init" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561116 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" containerName="init" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561124 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerName="horizon-log" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561130 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerName="horizon-log" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561140 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerName="cinder-api-log" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561146 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerName="cinder-api-log" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561155 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" containerName="dnsmasq-dns" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561161 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" containerName="dnsmasq-dns" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561188 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerName="horizon" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561200 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerName="horizon" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561212 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerName="horizon-log" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561218 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerName="horizon-log" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.561231 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerName="neutron-httpd" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561236 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerName="neutron-httpd" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561388 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6a20d7b-0eda-4124-97f8-0882c8fb0d3e" containerName="dnsmasq-dns" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561397 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerName="neutron-httpd" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561404 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerName="horizon-log" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561411 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerName="cinder-api" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561424 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4316787-8af1-40ef-995a-0f8aabd1bf11" containerName="horizon" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561431 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="38194db3-f048-45e5-80d6-7dfa8f1f7420" containerName="neutron-api" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561444 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerName="horizon-log" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561452 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" containerName="cinder-api-log" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.561461 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="689a4cb6-8c65-4db6-b3d2-b825ca6ddd7e" containerName="horizon" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.562406 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.565428 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.565552 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.565806 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.571402 4492 scope.go:117] "RemoveContainer" containerID="5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.584027 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.598392 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1\": container with ID starting with 5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1 not found: ID does not exist" containerID="5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.598440 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1"} err="failed to get container status \"5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1\": rpc error: code = NotFound desc = could not find container \"5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1\": container with ID starting with 5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1 not found: ID does not exist" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.598469 4492 scope.go:117] "RemoveContainer" containerID="077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee" Nov 26 07:06:30 crc kubenswrapper[4492]: E1126 07:06:30.601500 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee\": container with ID starting with 077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee not found: ID does not exist" containerID="077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.601532 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee"} err="failed to get container status \"077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee\": rpc error: code = NotFound desc = could not find container \"077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee\": container with ID starting with 077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee not found: ID does not exist" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.601561 4492 scope.go:117] "RemoveContainer" containerID="5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.602014 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1"} err="failed to get container status \"5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1\": rpc error: code = NotFound desc = could not find container \"5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1\": container with ID starting with 5ed9723dde325290e874a9c0bd13c009a5dd9a94bb76d8182ffbaaf185dc06e1 not found: ID does not exist" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.602032 4492 scope.go:117] "RemoveContainer" containerID="077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.604006 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee"} err="failed to get container status \"077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee\": rpc error: code = NotFound desc = could not find container \"077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee\": container with ID starting with 077b2570f8f00518d542907f027b7a6bfb363e56d3fc7c97834c9692458204ee not found: ID does not exist" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.709986 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-config-data-custom\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.710063 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfg96\" (UniqueName: \"kubernetes.io/projected/024b3aa7-2c4d-4f35-b464-480c5f96847c-kube-api-access-pfg96\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.710098 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-scripts\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.710120 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/024b3aa7-2c4d-4f35-b464-480c5f96847c-logs\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.710144 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.710164 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.710283 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/024b3aa7-2c4d-4f35-b464-480c5f96847c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.710301 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-config-data\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.710392 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.813809 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/024b3aa7-2c4d-4f35-b464-480c5f96847c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.814139 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-config-data\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.814400 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.814532 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-config-data-custom\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.814587 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfg96\" (UniqueName: \"kubernetes.io/projected/024b3aa7-2c4d-4f35-b464-480c5f96847c-kube-api-access-pfg96\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.814632 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-scripts\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.814666 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/024b3aa7-2c4d-4f35-b464-480c5f96847c-logs\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.814706 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.814731 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.826803 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.826853 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.828571 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/024b3aa7-2c4d-4f35-b464-480c5f96847c-logs\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.830999 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-config-data\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.831046 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/024b3aa7-2c4d-4f35-b464-480c5f96847c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.837024 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-config-data-custom\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.840764 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.853758 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/024b3aa7-2c4d-4f35-b464-480c5f96847c-scripts\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.854195 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfg96\" (UniqueName: \"kubernetes.io/projected/024b3aa7-2c4d-4f35-b464-480c5f96847c-kube-api-access-pfg96\") pod \"cinder-api-0\" (UID: \"024b3aa7-2c4d-4f35-b464-480c5f96847c\") " pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.879093 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 07:06:30 crc kubenswrapper[4492]: I1126 07:06:30.951530 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.123758 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-run-httpd\") pod \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.124130 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbm4n\" (UniqueName: \"kubernetes.io/projected/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-kube-api-access-xbm4n\") pod \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.124191 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-combined-ca-bundle\") pod \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.124235 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" (UID: "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.124270 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-scripts\") pod \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.124328 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-config-data\") pod \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.124345 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-sg-core-conf-yaml\") pod \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.124456 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-log-httpd\") pod \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\" (UID: \"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc\") " Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.124881 4492 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.125290 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" (UID: "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.131015 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-scripts" (OuterVolumeSpecName: "scripts") pod "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" (UID: "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.131143 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-kube-api-access-xbm4n" (OuterVolumeSpecName: "kube-api-access-xbm4n") pod "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" (UID: "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc"). InnerVolumeSpecName "kube-api-access-xbm4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.152402 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" (UID: "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.174764 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" (UID: "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.200640 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-config-data" (OuterVolumeSpecName: "config-data") pod "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" (UID: "e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.227453 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbm4n\" (UniqueName: \"kubernetes.io/projected/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-kube-api-access-xbm4n\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.227489 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.227502 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.227513 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.227527 4492 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.227536 4492 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.332747 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.525492 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"024b3aa7-2c4d-4f35-b464-480c5f96847c","Type":"ContainerStarted","Data":"bebe020a6b7e38ebf73320849cdba2c4060e3229d7d27f1ecbaf41d4d6021f56"} Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.530480 4492 generic.go:334] "Generic (PLEG): container finished" podID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerID="5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9" exitCode=0 Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.530614 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.530687 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc","Type":"ContainerDied","Data":"5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9"} Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.531099 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc","Type":"ContainerDied","Data":"e6e216cf30710e9bc2b45282a9035bf0b72e3a7a662e2cf9b4e9522b299f5726"} Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.531150 4492 scope.go:117] "RemoveContainer" containerID="06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.593236 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.598471 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.643647 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:31 crc kubenswrapper[4492]: E1126 07:06:31.644683 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="ceilometer-notification-agent" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.644703 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="ceilometer-notification-agent" Nov 26 07:06:31 crc kubenswrapper[4492]: E1126 07:06:31.644712 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="proxy-httpd" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.644719 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="proxy-httpd" Nov 26 07:06:31 crc kubenswrapper[4492]: E1126 07:06:31.644735 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="sg-core" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.644741 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="sg-core" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.644988 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="sg-core" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.645017 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="proxy-httpd" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.645032 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" containerName="ceilometer-notification-agent" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.662070 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.662338 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.669693 4492 scope.go:117] "RemoveContainer" containerID="79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.682818 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.683115 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.756445 4492 scope.go:117] "RemoveContainer" containerID="5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.820873 4492 scope.go:117] "RemoveContainer" containerID="06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767" Nov 26 07:06:31 crc kubenswrapper[4492]: E1126 07:06:31.825889 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767\": container with ID starting with 06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767 not found: ID does not exist" containerID="06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.825936 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767"} err="failed to get container status \"06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767\": rpc error: code = NotFound desc = could not find container \"06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767\": container with ID starting with 06deabb6e42f04649aee661f9b104416e1add2d2c2229f9d6bd4dc3b667d9767 not found: ID does not exist" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.825964 4492 scope.go:117] "RemoveContainer" containerID="79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564" Nov 26 07:06:31 crc kubenswrapper[4492]: E1126 07:06:31.826373 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564\": container with ID starting with 79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564 not found: ID does not exist" containerID="79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.826414 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564"} err="failed to get container status \"79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564\": rpc error: code = NotFound desc = could not find container \"79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564\": container with ID starting with 79a3438798f4dbce66ccdb69dd77edcac910249c13dc4e1f7ab7cb20daed8564 not found: ID does not exist" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.826442 4492 scope.go:117] "RemoveContainer" containerID="5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9" Nov 26 07:06:31 crc kubenswrapper[4492]: E1126 07:06:31.826947 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9\": container with ID starting with 5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9 not found: ID does not exist" containerID="5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.826969 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9"} err="failed to get container status \"5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9\": rpc error: code = NotFound desc = could not find container \"5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9\": container with ID starting with 5ea9c972c1cf68b00b9528962903d8c7078fa73512e8150c37f248b0aa4f78b9 not found: ID does not exist" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.853310 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-log-httpd\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.854034 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-run-httpd\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.854151 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqzgl\" (UniqueName: \"kubernetes.io/projected/f353995e-4082-4459-9591-efae7915aed4-kube-api-access-kqzgl\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.854200 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-config-data\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.854237 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-scripts\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.854301 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.854363 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.956801 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-run-httpd\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.956917 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqzgl\" (UniqueName: \"kubernetes.io/projected/f353995e-4082-4459-9591-efae7915aed4-kube-api-access-kqzgl\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.956990 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-config-data\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.957043 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-scripts\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.957094 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.957146 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.957312 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-log-httpd\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.957635 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-run-httpd\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.957833 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-log-httpd\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.961989 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.963833 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-config-data\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.972638 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.982793 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-scripts\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:31 crc kubenswrapper[4492]: I1126 07:06:31.988752 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqzgl\" (UniqueName: \"kubernetes.io/projected/f353995e-4082-4459-9591-efae7915aed4-kube-api-access-kqzgl\") pod \"ceilometer-0\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " pod="openstack/ceilometer-0" Nov 26 07:06:32 crc kubenswrapper[4492]: I1126 07:06:32.029310 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:32 crc kubenswrapper[4492]: I1126 07:06:32.494703 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9" path="/var/lib/kubelet/pods/a3d0b640-5e5c-467e-b1d4-8fb05d86d7c9/volumes" Nov 26 07:06:32 crc kubenswrapper[4492]: I1126 07:06:32.497363 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc" path="/var/lib/kubelet/pods/e6cd09e6-a2ae-4a99-8b1a-a911b24bdadc/volumes" Nov 26 07:06:32 crc kubenswrapper[4492]: I1126 07:06:32.549210 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:32 crc kubenswrapper[4492]: I1126 07:06:32.561514 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"024b3aa7-2c4d-4f35-b464-480c5f96847c","Type":"ContainerStarted","Data":"76cc8be2fe6f4fec4ba6dd2d090497c895175100640767061bea268e0c65131f"} Nov 26 07:06:32 crc kubenswrapper[4492]: I1126 07:06:32.734725 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 07:06:32 crc kubenswrapper[4492]: I1126 07:06:32.904417 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:06:32 crc kubenswrapper[4492]: I1126 07:06:32.974480 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cb64f9797-5jdj2"] Nov 26 07:06:32 crc kubenswrapper[4492]: I1126 07:06:32.974702 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" podUID="4f397877-f399-472d-a32d-11cb9b87fd73" containerName="dnsmasq-dns" containerID="cri-o://e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa" gracePeriod=10 Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.077831 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" podUID="4f397877-f399-472d-a32d-11cb9b87fd73" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.155:5353: connect: connection refused" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.122504 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.350078 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-bf7664774-jvgvf" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.423590 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6979b94f94-5ddp5"] Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.423994 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6979b94f94-5ddp5" podUID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerName="barbican-api-log" containerID="cri-o://ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066" gracePeriod=30 Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.424396 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6979b94f94-5ddp5" podUID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerName="barbican-api" containerID="cri-o://03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef" gracePeriod=30 Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.436401 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.577721 4492 generic.go:334] "Generic (PLEG): container finished" podID="4f397877-f399-472d-a32d-11cb9b87fd73" containerID="e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa" exitCode=0 Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.577795 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.577798 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" event={"ID":"4f397877-f399-472d-a32d-11cb9b87fd73","Type":"ContainerDied","Data":"e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa"} Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.577914 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb64f9797-5jdj2" event={"ID":"4f397877-f399-472d-a32d-11cb9b87fd73","Type":"ContainerDied","Data":"6f070c6428d4d6c332433a36efc6332be9724108f8fef6baac21cb4dd948010d"} Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.577952 4492 scope.go:117] "RemoveContainer" containerID="e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.579760 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerStarted","Data":"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b"} Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.579817 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerStarted","Data":"30c28d5e683aa178b3996cef8ba6e3356df5d59965845defa5f18ceb58bfe7ce"} Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.591684 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"024b3aa7-2c4d-4f35-b464-480c5f96847c","Type":"ContainerStarted","Data":"02760b0fe3f570187aa2ab27bf074bea490ed7095dc51188e21d9c9c58dd2e48"} Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.591735 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.602158 4492 scope.go:117] "RemoveContainer" containerID="dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.607321 4492 generic.go:334] "Generic (PLEG): container finished" podID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerID="ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066" exitCode=143 Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.607823 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6979b94f94-5ddp5" event={"ID":"0d14378e-a9ce-4c01-854f-a5f9ca277f73","Type":"ContainerDied","Data":"ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066"} Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.628795 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-config\") pod \"4f397877-f399-472d-a32d-11cb9b87fd73\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.628977 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-sb\") pod \"4f397877-f399-472d-a32d-11cb9b87fd73\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.629085 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-swift-storage-0\") pod \"4f397877-f399-472d-a32d-11cb9b87fd73\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.629205 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-nb\") pod \"4f397877-f399-472d-a32d-11cb9b87fd73\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.632518 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-svc\") pod \"4f397877-f399-472d-a32d-11cb9b87fd73\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.632640 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2t8zr\" (UniqueName: \"kubernetes.io/projected/4f397877-f399-472d-a32d-11cb9b87fd73-kube-api-access-2t8zr\") pod \"4f397877-f399-472d-a32d-11cb9b87fd73\" (UID: \"4f397877-f399-472d-a32d-11cb9b87fd73\") " Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.639148 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f397877-f399-472d-a32d-11cb9b87fd73-kube-api-access-2t8zr" (OuterVolumeSpecName: "kube-api-access-2t8zr") pod "4f397877-f399-472d-a32d-11cb9b87fd73" (UID: "4f397877-f399-472d-a32d-11cb9b87fd73"). InnerVolumeSpecName "kube-api-access-2t8zr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.718062 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4f397877-f399-472d-a32d-11cb9b87fd73" (UID: "4f397877-f399-472d-a32d-11cb9b87fd73"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.724008 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-config" (OuterVolumeSpecName: "config") pod "4f397877-f399-472d-a32d-11cb9b87fd73" (UID: "4f397877-f399-472d-a32d-11cb9b87fd73"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.734811 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.734837 4492 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.734850 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2t8zr\" (UniqueName: \"kubernetes.io/projected/4f397877-f399-472d-a32d-11cb9b87fd73-kube-api-access-2t8zr\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.755109 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4f397877-f399-472d-a32d-11cb9b87fd73" (UID: "4f397877-f399-472d-a32d-11cb9b87fd73"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.759226 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4f397877-f399-472d-a32d-11cb9b87fd73" (UID: "4f397877-f399-472d-a32d-11cb9b87fd73"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.765647 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4f397877-f399-472d-a32d-11cb9b87fd73" (UID: "4f397877-f399-472d-a32d-11cb9b87fd73"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.836623 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.836846 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.836856 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f397877-f399-472d-a32d-11cb9b87fd73-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.838491 4492 scope.go:117] "RemoveContainer" containerID="e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa" Nov 26 07:06:33 crc kubenswrapper[4492]: E1126 07:06:33.839008 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa\": container with ID starting with e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa not found: ID does not exist" containerID="e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.839056 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa"} err="failed to get container status \"e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa\": rpc error: code = NotFound desc = could not find container \"e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa\": container with ID starting with e08d804f4aee77c042bd46163828d85cdfff70f369ca3e0e6d23dace54d1eefa not found: ID does not exist" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.839080 4492 scope.go:117] "RemoveContainer" containerID="dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318" Nov 26 07:06:33 crc kubenswrapper[4492]: E1126 07:06:33.839509 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318\": container with ID starting with dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318 not found: ID does not exist" containerID="dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.839534 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318"} err="failed to get container status \"dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318\": rpc error: code = NotFound desc = could not find container \"dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318\": container with ID starting with dcd23b3979acf9eeb360e46d62496cb82707a3ad11569bbdc413d7006cb80318 not found: ID does not exist" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.899232 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.8992062069999998 podStartE2EDuration="3.899206207s" podCreationTimestamp="2025-11-26 07:06:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:33.613459629 +0000 UTC m=+1089.497347928" watchObservedRunningTime="2025-11-26 07:06:33.899206207 +0000 UTC m=+1089.783094505" Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.905472 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cb64f9797-5jdj2"] Nov 26 07:06:33 crc kubenswrapper[4492]: I1126 07:06:33.910293 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cb64f9797-5jdj2"] Nov 26 07:06:34 crc kubenswrapper[4492]: I1126 07:06:34.368295 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7bb6557f96-rgc7g" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.153:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.153:8443: connect: connection refused" Nov 26 07:06:34 crc kubenswrapper[4492]: I1126 07:06:34.455079 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f397877-f399-472d-a32d-11cb9b87fd73" path="/var/lib/kubelet/pods/4f397877-f399-472d-a32d-11cb9b87fd73/volumes" Nov 26 07:06:34 crc kubenswrapper[4492]: I1126 07:06:34.617465 4492 generic.go:334] "Generic (PLEG): container finished" podID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerID="43fafd6a195677c69054be8121016986713c956dc138b464b122d51ce9a8af53" exitCode=0 Nov 26 07:06:34 crc kubenswrapper[4492]: I1126 07:06:34.617546 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb6557f96-rgc7g" event={"ID":"a15ec528-9195-4dfe-95b7-e30a44f74b44","Type":"ContainerDied","Data":"43fafd6a195677c69054be8121016986713c956dc138b464b122d51ce9a8af53"} Nov 26 07:06:34 crc kubenswrapper[4492]: I1126 07:06:34.621813 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerStarted","Data":"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5"} Nov 26 07:06:35 crc kubenswrapper[4492]: I1126 07:06:35.627632 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-647b7bfcb8-p68l9" Nov 26 07:06:35 crc kubenswrapper[4492]: I1126 07:06:35.632795 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerStarted","Data":"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc"} Nov 26 07:06:36 crc kubenswrapper[4492]: I1126 07:06:36.644965 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerStarted","Data":"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151"} Nov 26 07:06:36 crc kubenswrapper[4492]: I1126 07:06:36.645375 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:06:36 crc kubenswrapper[4492]: I1126 07:06:36.669089 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.077069324 podStartE2EDuration="5.669069309s" podCreationTimestamp="2025-11-26 07:06:31 +0000 UTC" firstStartedPulling="2025-11-26 07:06:32.567487655 +0000 UTC m=+1088.451375953" lastFinishedPulling="2025-11-26 07:06:36.15948764 +0000 UTC m=+1092.043375938" observedRunningTime="2025-11-26 07:06:36.661668293 +0000 UTC m=+1092.545556592" watchObservedRunningTime="2025-11-26 07:06:36.669069309 +0000 UTC m=+1092.552957608" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.262681 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.415430 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data\") pod \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.415656 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d14378e-a9ce-4c01-854f-a5f9ca277f73-logs\") pod \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.415695 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9jfx\" (UniqueName: \"kubernetes.io/projected/0d14378e-a9ce-4c01-854f-a5f9ca277f73-kube-api-access-f9jfx\") pod \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.415808 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data-custom\") pod \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.415887 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-combined-ca-bundle\") pod \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\" (UID: \"0d14378e-a9ce-4c01-854f-a5f9ca277f73\") " Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.417061 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d14378e-a9ce-4c01-854f-a5f9ca277f73-logs" (OuterVolumeSpecName: "logs") pod "0d14378e-a9ce-4c01-854f-a5f9ca277f73" (UID: "0d14378e-a9ce-4c01-854f-a5f9ca277f73"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.423432 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d14378e-a9ce-4c01-854f-a5f9ca277f73-kube-api-access-f9jfx" (OuterVolumeSpecName: "kube-api-access-f9jfx") pod "0d14378e-a9ce-4c01-854f-a5f9ca277f73" (UID: "0d14378e-a9ce-4c01-854f-a5f9ca277f73"). InnerVolumeSpecName "kube-api-access-f9jfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.425205 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0d14378e-a9ce-4c01-854f-a5f9ca277f73" (UID: "0d14378e-a9ce-4c01-854f-a5f9ca277f73"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.456260 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d14378e-a9ce-4c01-854f-a5f9ca277f73" (UID: "0d14378e-a9ce-4c01-854f-a5f9ca277f73"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.474943 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data" (OuterVolumeSpecName: "config-data") pod "0d14378e-a9ce-4c01-854f-a5f9ca277f73" (UID: "0d14378e-a9ce-4c01-854f-a5f9ca277f73"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.519362 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.519400 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d14378e-a9ce-4c01-854f-a5f9ca277f73-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.519414 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9jfx\" (UniqueName: \"kubernetes.io/projected/0d14378e-a9ce-4c01-854f-a5f9ca277f73-kube-api-access-f9jfx\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.519426 4492 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.519435 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d14378e-a9ce-4c01-854f-a5f9ca277f73-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.656808 4492 generic.go:334] "Generic (PLEG): container finished" podID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerID="03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef" exitCode=0 Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.657783 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6979b94f94-5ddp5" event={"ID":"0d14378e-a9ce-4c01-854f-a5f9ca277f73","Type":"ContainerDied","Data":"03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef"} Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.657828 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6979b94f94-5ddp5" event={"ID":"0d14378e-a9ce-4c01-854f-a5f9ca277f73","Type":"ContainerDied","Data":"485c287263dc31b1985efbfbd7c1195ed2984b98355287da0cacd958ae8bbf7f"} Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.657850 4492 scope.go:117] "RemoveContainer" containerID="03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.658025 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6979b94f94-5ddp5" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.690305 4492 scope.go:117] "RemoveContainer" containerID="ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.697210 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6979b94f94-5ddp5"] Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.705343 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6979b94f94-5ddp5"] Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.721072 4492 scope.go:117] "RemoveContainer" containerID="03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef" Nov 26 07:06:37 crc kubenswrapper[4492]: E1126 07:06:37.721375 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef\": container with ID starting with 03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef not found: ID does not exist" containerID="03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.721403 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef"} err="failed to get container status \"03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef\": rpc error: code = NotFound desc = could not find container \"03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef\": container with ID starting with 03360ce9a4b3df2cc2b1f7000a1ee59185b92440cf62ab59fca8ac04e6c412ef not found: ID does not exist" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.721422 4492 scope.go:117] "RemoveContainer" containerID="ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066" Nov 26 07:06:37 crc kubenswrapper[4492]: E1126 07:06:37.721737 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066\": container with ID starting with ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066 not found: ID does not exist" containerID="ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066" Nov 26 07:06:37 crc kubenswrapper[4492]: I1126 07:06:37.721756 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066"} err="failed to get container status \"ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066\": rpc error: code = NotFound desc = could not find container \"ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066\": container with ID starting with ce4142b9167acdddbb6b01998e851089dd94701f8afcba4a1e2db70d070f5066 not found: ID does not exist" Nov 26 07:06:38 crc kubenswrapper[4492]: I1126 07:06:38.013586 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 07:06:38 crc kubenswrapper[4492]: I1126 07:06:38.078635 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:06:38 crc kubenswrapper[4492]: I1126 07:06:38.447912 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" path="/var/lib/kubelet/pods/0d14378e-a9ce-4c01-854f-a5f9ca277f73/volumes" Nov 26 07:06:38 crc kubenswrapper[4492]: I1126 07:06:38.667336 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerName="cinder-scheduler" containerID="cri-o://cecf3a295e64919cdc40f558352d93e08f30c11c0b7f2963a84b766132ced8de" gracePeriod=30 Nov 26 07:06:38 crc kubenswrapper[4492]: I1126 07:06:38.667914 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerName="probe" containerID="cri-o://16f53fca6cf79164a90bde5dbdcf56b575255f3a74db4594867316f492ce58fc" gracePeriod=30 Nov 26 07:06:39 crc kubenswrapper[4492]: E1126 07:06:39.596162 4492 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c6c7b43_f00e_4013_ab11_b0471d7d1f68.slice/crio-conmon-16f53fca6cf79164a90bde5dbdcf56b575255f3a74db4594867316f492ce58fc.scope\": RecentStats: unable to find data in memory cache]" Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.680518 4492 generic.go:334] "Generic (PLEG): container finished" podID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerID="16f53fca6cf79164a90bde5dbdcf56b575255f3a74db4594867316f492ce58fc" exitCode=0 Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.680567 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6c6c7b43-f00e-4013-ab11-b0471d7d1f68","Type":"ContainerDied","Data":"16f53fca6cf79164a90bde5dbdcf56b575255f3a74db4594867316f492ce58fc"} Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.998498 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-645d6d85d7-cvr9h"] Nov 26 07:06:39 crc kubenswrapper[4492]: E1126 07:06:39.998994 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f397877-f399-472d-a32d-11cb9b87fd73" containerName="init" Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.999014 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f397877-f399-472d-a32d-11cb9b87fd73" containerName="init" Nov 26 07:06:39 crc kubenswrapper[4492]: E1126 07:06:39.999040 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerName="barbican-api" Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.999047 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerName="barbican-api" Nov 26 07:06:39 crc kubenswrapper[4492]: E1126 07:06:39.999061 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerName="barbican-api-log" Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.999070 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerName="barbican-api-log" Nov 26 07:06:39 crc kubenswrapper[4492]: E1126 07:06:39.999087 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f397877-f399-472d-a32d-11cb9b87fd73" containerName="dnsmasq-dns" Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.999093 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f397877-f399-472d-a32d-11cb9b87fd73" containerName="dnsmasq-dns" Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.999288 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerName="barbican-api" Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.999303 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d14378e-a9ce-4c01-854f-a5f9ca277f73" containerName="barbican-api-log" Nov 26 07:06:39 crc kubenswrapper[4492]: I1126 07:06:39.999313 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f397877-f399-472d-a32d-11cb9b87fd73" containerName="dnsmasq-dns" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.000349 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.003121 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.003346 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.004631 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.042673 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-645d6d85d7-cvr9h"] Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.171829 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.173333 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.176455 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.185242 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-6rcf8" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.187791 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.204441 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-combined-ca-bundle\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.215282 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ddp4\" (UniqueName: \"kubernetes.io/projected/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-kube-api-access-4ddp4\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.215355 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-config-data\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.215413 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-run-httpd\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.215479 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-log-httpd\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.215530 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-etc-swift\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.215557 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-public-tls-certs\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.215605 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-internal-tls-certs\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.228442 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321236 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-public-tls-certs\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321305 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-internal-tls-certs\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321398 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-combined-ca-bundle\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321434 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ddp4\" (UniqueName: \"kubernetes.io/projected/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-kube-api-access-4ddp4\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321468 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp7jt\" (UniqueName: \"kubernetes.io/projected/7d2be4f4-9426-4905-b141-a93775673428-kube-api-access-qp7jt\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321494 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-config-data\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321519 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-run-httpd\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321540 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d2be4f4-9426-4905-b141-a93775673428-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321576 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-log-httpd\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321596 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7d2be4f4-9426-4905-b141-a93775673428-openstack-config-secret\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321621 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7d2be4f4-9426-4905-b141-a93775673428-openstack-config\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.321643 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-etc-swift\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.323733 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-log-httpd\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.324037 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-run-httpd\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.331789 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-config-data\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.332785 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-internal-tls-certs\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.334617 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-public-tls-certs\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.341880 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-combined-ca-bundle\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.347475 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-etc-swift\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.356640 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ddp4\" (UniqueName: \"kubernetes.io/projected/e44b94a7-c7a7-40e1-8d00-9f27e0e0639e-kube-api-access-4ddp4\") pod \"swift-proxy-645d6d85d7-cvr9h\" (UID: \"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e\") " pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.424141 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp7jt\" (UniqueName: \"kubernetes.io/projected/7d2be4f4-9426-4905-b141-a93775673428-kube-api-access-qp7jt\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.424494 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d2be4f4-9426-4905-b141-a93775673428-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.424582 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7d2be4f4-9426-4905-b141-a93775673428-openstack-config-secret\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.424622 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7d2be4f4-9426-4905-b141-a93775673428-openstack-config\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.425805 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7d2be4f4-9426-4905-b141-a93775673428-openstack-config\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.431644 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d2be4f4-9426-4905-b141-a93775673428-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.439023 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7d2be4f4-9426-4905-b141-a93775673428-openstack-config-secret\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.453029 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp7jt\" (UniqueName: \"kubernetes.io/projected/7d2be4f4-9426-4905-b141-a93775673428-kube-api-access-qp7jt\") pod \"openstackclient\" (UID: \"7d2be4f4-9426-4905-b141-a93775673428\") " pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.555695 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.620994 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.704799 4492 generic.go:334] "Generic (PLEG): container finished" podID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerID="cecf3a295e64919cdc40f558352d93e08f30c11c0b7f2963a84b766132ced8de" exitCode=0 Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.704841 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6c6c7b43-f00e-4013-ab11-b0471d7d1f68","Type":"ContainerDied","Data":"cecf3a295e64919cdc40f558352d93e08f30c11c0b7f2963a84b766132ced8de"} Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.805289 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.852667 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data-custom\") pod \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.852858 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data\") pod \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.852900 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-combined-ca-bundle\") pod \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.852917 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2q7gr\" (UniqueName: \"kubernetes.io/projected/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-kube-api-access-2q7gr\") pod \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.852982 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-etc-machine-id\") pod \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.853019 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-scripts\") pod \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\" (UID: \"6c6c7b43-f00e-4013-ab11-b0471d7d1f68\") " Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.854626 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6c6c7b43-f00e-4013-ab11-b0471d7d1f68" (UID: "6c6c7b43-f00e-4013-ab11-b0471d7d1f68"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.855742 4492 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.864348 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6c6c7b43-f00e-4013-ab11-b0471d7d1f68" (UID: "6c6c7b43-f00e-4013-ab11-b0471d7d1f68"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.868404 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-scripts" (OuterVolumeSpecName: "scripts") pod "6c6c7b43-f00e-4013-ab11-b0471d7d1f68" (UID: "6c6c7b43-f00e-4013-ab11-b0471d7d1f68"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.868554 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-kube-api-access-2q7gr" (OuterVolumeSpecName: "kube-api-access-2q7gr") pod "6c6c7b43-f00e-4013-ab11-b0471d7d1f68" (UID: "6c6c7b43-f00e-4013-ab11-b0471d7d1f68"). InnerVolumeSpecName "kube-api-access-2q7gr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.961663 4492 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.961750 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2q7gr\" (UniqueName: \"kubernetes.io/projected/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-kube-api-access-2q7gr\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.961765 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:40 crc kubenswrapper[4492]: I1126 07:06:40.976472 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c6c7b43-f00e-4013-ab11-b0471d7d1f68" (UID: "6c6c7b43-f00e-4013-ab11-b0471d7d1f68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.030458 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data" (OuterVolumeSpecName: "config-data") pod "6c6c7b43-f00e-4013-ab11-b0471d7d1f68" (UID: "6c6c7b43-f00e-4013-ab11-b0471d7d1f68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.064344 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.064380 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6c7b43-f00e-4013-ab11-b0471d7d1f68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.162899 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 07:06:41 crc kubenswrapper[4492]: W1126 07:06:41.169437 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d2be4f4_9426_4905_b141_a93775673428.slice/crio-37ccba7f48d808fecbe7519f0896996f24ecdbb9aaec378e2cf3a2057d060bee WatchSource:0}: Error finding container 37ccba7f48d808fecbe7519f0896996f24ecdbb9aaec378e2cf3a2057d060bee: Status 404 returned error can't find the container with id 37ccba7f48d808fecbe7519f0896996f24ecdbb9aaec378e2cf3a2057d060bee Nov 26 07:06:41 crc kubenswrapper[4492]: W1126 07:06:41.366098 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode44b94a7_c7a7_40e1_8d00_9f27e0e0639e.slice/crio-c886f06dc77cf177bff56b546ff6065f7db46094f196bf851b9ee4dde845da52 WatchSource:0}: Error finding container c886f06dc77cf177bff56b546ff6065f7db46094f196bf851b9ee4dde845da52: Status 404 returned error can't find the container with id c886f06dc77cf177bff56b546ff6065f7db46094f196bf851b9ee4dde845da52 Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.366790 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-645d6d85d7-cvr9h"] Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.560021 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.560281 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="ceilometer-central-agent" containerID="cri-o://0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b" gracePeriod=30 Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.560360 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="proxy-httpd" containerID="cri-o://3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151" gracePeriod=30 Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.560480 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="ceilometer-notification-agent" containerID="cri-o://d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5" gracePeriod=30 Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.560486 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="sg-core" containerID="cri-o://cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc" gracePeriod=30 Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.749257 4492 generic.go:334] "Generic (PLEG): container finished" podID="f353995e-4082-4459-9591-efae7915aed4" containerID="cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc" exitCode=2 Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.749323 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerDied","Data":"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc"} Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.757229 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7d2be4f4-9426-4905-b141-a93775673428","Type":"ContainerStarted","Data":"37ccba7f48d808fecbe7519f0896996f24ecdbb9aaec378e2cf3a2057d060bee"} Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.762279 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-645d6d85d7-cvr9h" event={"ID":"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e","Type":"ContainerStarted","Data":"b074fecabfea28cf79ce1b94ce5dd22e7cf01549eabe27e81fad6cfe293974d5"} Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.762308 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-645d6d85d7-cvr9h" event={"ID":"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e","Type":"ContainerStarted","Data":"c886f06dc77cf177bff56b546ff6065f7db46094f196bf851b9ee4dde845da52"} Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.768073 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"6c6c7b43-f00e-4013-ab11-b0471d7d1f68","Type":"ContainerDied","Data":"4ed06661faf20c3987734974ea64c5bb0920b28a5507dffc3e42c840b0abb3e5"} Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.768115 4492 scope.go:117] "RemoveContainer" containerID="16f53fca6cf79164a90bde5dbdcf56b575255f3a74db4594867316f492ce58fc" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.768276 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.837823 4492 scope.go:117] "RemoveContainer" containerID="cecf3a295e64919cdc40f558352d93e08f30c11c0b7f2963a84b766132ced8de" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.871157 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.882329 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.895580 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:06:41 crc kubenswrapper[4492]: E1126 07:06:41.912814 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerName="probe" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.912841 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerName="probe" Nov 26 07:06:41 crc kubenswrapper[4492]: E1126 07:06:41.912865 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerName="cinder-scheduler" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.912873 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerName="cinder-scheduler" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.913043 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerName="probe" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.913054 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" containerName="cinder-scheduler" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.913832 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.913906 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:06:41 crc kubenswrapper[4492]: I1126 07:06:41.919392 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.008288 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-scripts\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.008394 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.008429 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15799fff-2977-41be-bf39-d2cb2c360212-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.008467 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-config-data\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.008508 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.008546 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x74x7\" (UniqueName: \"kubernetes.io/projected/15799fff-2977-41be-bf39-d2cb2c360212-kube-api-access-x74x7\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.110142 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.110203 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15799fff-2977-41be-bf39-d2cb2c360212-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.110229 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-config-data\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.110255 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.110280 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x74x7\" (UniqueName: \"kubernetes.io/projected/15799fff-2977-41be-bf39-d2cb2c360212-kube-api-access-x74x7\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.110335 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-scripts\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.110927 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/15799fff-2977-41be-bf39-d2cb2c360212-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.116591 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.116715 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.117538 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-config-data\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.119592 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15799fff-2977-41be-bf39-d2cb2c360212-scripts\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.134555 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x74x7\" (UniqueName: \"kubernetes.io/projected/15799fff-2977-41be-bf39-d2cb2c360212-kube-api-access-x74x7\") pod \"cinder-scheduler-0\" (UID: \"15799fff-2977-41be-bf39-d2cb2c360212\") " pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.242882 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.460689 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c6c7b43-f00e-4013-ab11-b0471d7d1f68" path="/var/lib/kubelet/pods/6c6c7b43-f00e-4013-ab11-b0471d7d1f68/volumes" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.634968 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.725306 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-run-httpd\") pod \"f353995e-4082-4459-9591-efae7915aed4\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.725384 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-config-data\") pod \"f353995e-4082-4459-9591-efae7915aed4\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.725513 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-combined-ca-bundle\") pod \"f353995e-4082-4459-9591-efae7915aed4\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.725544 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-scripts\") pod \"f353995e-4082-4459-9591-efae7915aed4\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.725579 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-sg-core-conf-yaml\") pod \"f353995e-4082-4459-9591-efae7915aed4\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.725643 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqzgl\" (UniqueName: \"kubernetes.io/projected/f353995e-4082-4459-9591-efae7915aed4-kube-api-access-kqzgl\") pod \"f353995e-4082-4459-9591-efae7915aed4\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.725840 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f353995e-4082-4459-9591-efae7915aed4" (UID: "f353995e-4082-4459-9591-efae7915aed4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.727323 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-log-httpd\") pod \"f353995e-4082-4459-9591-efae7915aed4\" (UID: \"f353995e-4082-4459-9591-efae7915aed4\") " Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.727760 4492 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.728780 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f353995e-4082-4459-9591-efae7915aed4" (UID: "f353995e-4082-4459-9591-efae7915aed4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.740639 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f353995e-4082-4459-9591-efae7915aed4-kube-api-access-kqzgl" (OuterVolumeSpecName: "kube-api-access-kqzgl") pod "f353995e-4082-4459-9591-efae7915aed4" (UID: "f353995e-4082-4459-9591-efae7915aed4"). InnerVolumeSpecName "kube-api-access-kqzgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.742108 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-scripts" (OuterVolumeSpecName: "scripts") pod "f353995e-4082-4459-9591-efae7915aed4" (UID: "f353995e-4082-4459-9591-efae7915aed4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.812789 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-645d6d85d7-cvr9h" event={"ID":"e44b94a7-c7a7-40e1-8d00-9f27e0e0639e","Type":"ContainerStarted","Data":"486e9493dee64750b654bb37f932817d9d7833e6026d9c7381965c2e4c5b39a9"} Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.816338 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.816371 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.824742 4492 generic.go:334] "Generic (PLEG): container finished" podID="f353995e-4082-4459-9591-efae7915aed4" containerID="3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151" exitCode=0 Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.824766 4492 generic.go:334] "Generic (PLEG): container finished" podID="f353995e-4082-4459-9591-efae7915aed4" containerID="d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5" exitCode=0 Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.824775 4492 generic.go:334] "Generic (PLEG): container finished" podID="f353995e-4082-4459-9591-efae7915aed4" containerID="0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b" exitCode=0 Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.824811 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerDied","Data":"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151"} Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.824833 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerDied","Data":"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5"} Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.824846 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerDied","Data":"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b"} Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.824856 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f353995e-4082-4459-9591-efae7915aed4","Type":"ContainerDied","Data":"30c28d5e683aa178b3996cef8ba6e3356df5d59965845defa5f18ceb58bfe7ce"} Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.824892 4492 scope.go:117] "RemoveContainer" containerID="3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.825020 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.827685 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f353995e-4082-4459-9591-efae7915aed4" (UID: "f353995e-4082-4459-9591-efae7915aed4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.829490 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.829516 4492 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.829528 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqzgl\" (UniqueName: \"kubernetes.io/projected/f353995e-4082-4459-9591-efae7915aed4-kube-api-access-kqzgl\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.829539 4492 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f353995e-4082-4459-9591-efae7915aed4-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.880796 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.891423 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-645d6d85d7-cvr9h" podStartSLOduration=3.89140066 podStartE2EDuration="3.89140066s" podCreationTimestamp="2025-11-26 07:06:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:42.851293391 +0000 UTC m=+1098.735181699" watchObservedRunningTime="2025-11-26 07:06:42.89140066 +0000 UTC m=+1098.775288948" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.893672 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f353995e-4082-4459-9591-efae7915aed4" (UID: "f353995e-4082-4459-9591-efae7915aed4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.907774 4492 scope.go:117] "RemoveContainer" containerID="cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.934396 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.968676 4492 scope.go:117] "RemoveContainer" containerID="d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5" Nov 26 07:06:42 crc kubenswrapper[4492]: I1126 07:06:42.997377 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-config-data" (OuterVolumeSpecName: "config-data") pod "f353995e-4082-4459-9591-efae7915aed4" (UID: "f353995e-4082-4459-9591-efae7915aed4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.004646 4492 scope.go:117] "RemoveContainer" containerID="0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.031398 4492 scope.go:117] "RemoveContainer" containerID="3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151" Nov 26 07:06:43 crc kubenswrapper[4492]: E1126 07:06:43.031829 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151\": container with ID starting with 3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151 not found: ID does not exist" containerID="3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.031859 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151"} err="failed to get container status \"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151\": rpc error: code = NotFound desc = could not find container \"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151\": container with ID starting with 3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151 not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.031885 4492 scope.go:117] "RemoveContainer" containerID="cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc" Nov 26 07:06:43 crc kubenswrapper[4492]: E1126 07:06:43.032229 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc\": container with ID starting with cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc not found: ID does not exist" containerID="cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.032256 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc"} err="failed to get container status \"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc\": rpc error: code = NotFound desc = could not find container \"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc\": container with ID starting with cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.032274 4492 scope.go:117] "RemoveContainer" containerID="d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5" Nov 26 07:06:43 crc kubenswrapper[4492]: E1126 07:06:43.032540 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5\": container with ID starting with d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5 not found: ID does not exist" containerID="d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.032567 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5"} err="failed to get container status \"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5\": rpc error: code = NotFound desc = could not find container \"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5\": container with ID starting with d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5 not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.032581 4492 scope.go:117] "RemoveContainer" containerID="0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b" Nov 26 07:06:43 crc kubenswrapper[4492]: E1126 07:06:43.032829 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b\": container with ID starting with 0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b not found: ID does not exist" containerID="0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.032871 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b"} err="failed to get container status \"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b\": rpc error: code = NotFound desc = could not find container \"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b\": container with ID starting with 0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.032885 4492 scope.go:117] "RemoveContainer" containerID="3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.033678 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151"} err="failed to get container status \"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151\": rpc error: code = NotFound desc = could not find container \"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151\": container with ID starting with 3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151 not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.033700 4492 scope.go:117] "RemoveContainer" containerID="cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.034012 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc"} err="failed to get container status \"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc\": rpc error: code = NotFound desc = could not find container \"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc\": container with ID starting with cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.034036 4492 scope.go:117] "RemoveContainer" containerID="d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.034363 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5"} err="failed to get container status \"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5\": rpc error: code = NotFound desc = could not find container \"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5\": container with ID starting with d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5 not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.034385 4492 scope.go:117] "RemoveContainer" containerID="0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.034617 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b"} err="failed to get container status \"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b\": rpc error: code = NotFound desc = could not find container \"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b\": container with ID starting with 0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.034639 4492 scope.go:117] "RemoveContainer" containerID="3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.034894 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151"} err="failed to get container status \"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151\": rpc error: code = NotFound desc = could not find container \"3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151\": container with ID starting with 3f4b7420aa02033ca2e78ba393017da8d6a10769f11228d4988069d07b599151 not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.034911 4492 scope.go:117] "RemoveContainer" containerID="cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.035135 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc"} err="failed to get container status \"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc\": rpc error: code = NotFound desc = could not find container \"cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc\": container with ID starting with cea6ffb26099a35ef4306859c1b69b264a4d070de2ca028f1f7de7227fb6d3cc not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.035152 4492 scope.go:117] "RemoveContainer" containerID="d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.035645 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5"} err="failed to get container status \"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5\": rpc error: code = NotFound desc = could not find container \"d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5\": container with ID starting with d96a99085304c63f2edf23c7670aaaddbd6572bbf5da0d4825f786c569c7aaa5 not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.035663 4492 scope.go:117] "RemoveContainer" containerID="0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.035867 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b"} err="failed to get container status \"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b\": rpc error: code = NotFound desc = could not find container \"0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b\": container with ID starting with 0d6371fa8930961b7c1b5ab38603c8ade2771dfbc6dd307a32ceaba2eb23213b not found: ID does not exist" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.036077 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f353995e-4082-4459-9591-efae7915aed4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.211063 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.224108 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.233246 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:43 crc kubenswrapper[4492]: E1126 07:06:43.233804 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="ceilometer-central-agent" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.233828 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="ceilometer-central-agent" Nov 26 07:06:43 crc kubenswrapper[4492]: E1126 07:06:43.233846 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="proxy-httpd" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.233859 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="proxy-httpd" Nov 26 07:06:43 crc kubenswrapper[4492]: E1126 07:06:43.233884 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="sg-core" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.233890 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="sg-core" Nov 26 07:06:43 crc kubenswrapper[4492]: E1126 07:06:43.233923 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="ceilometer-notification-agent" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.233941 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="ceilometer-notification-agent" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.234150 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="ceilometer-notification-agent" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.234228 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="proxy-httpd" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.234244 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="sg-core" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.234257 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f353995e-4082-4459-9591-efae7915aed4" containerName="ceilometer-central-agent" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.236601 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.241033 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.239090 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.244160 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.345483 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.345558 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-config-data\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.345622 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-log-httpd\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.345645 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.345665 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sc86\" (UniqueName: \"kubernetes.io/projected/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-kube-api-access-7sc86\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.345722 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-run-httpd\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.345755 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-scripts\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.446683 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-scripts\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.446759 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.446794 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-config-data\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.446828 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-log-httpd\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.446844 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.446865 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sc86\" (UniqueName: \"kubernetes.io/projected/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-kube-api-access-7sc86\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.446899 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-run-httpd\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.447334 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-run-httpd\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.448243 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-log-httpd\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.454593 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.454996 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.455637 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-scripts\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.456066 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-config-data\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.473823 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sc86\" (UniqueName: \"kubernetes.io/projected/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-kube-api-access-7sc86\") pod \"ceilometer-0\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.562579 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.656191 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.903923 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"15799fff-2977-41be-bf39-d2cb2c360212","Type":"ContainerStarted","Data":"9cd81588350c7e5687c30f063e560556bc207881f174475c423083fb7dab6041"} Nov 26 07:06:43 crc kubenswrapper[4492]: I1126 07:06:43.904192 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"15799fff-2977-41be-bf39-d2cb2c360212","Type":"ContainerStarted","Data":"4b985ddf2ee64aead3d7d5684c304164516c05bb3c445a398ad427385fe94fde"} Nov 26 07:06:44 crc kubenswrapper[4492]: I1126 07:06:44.168035 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:44 crc kubenswrapper[4492]: I1126 07:06:44.368003 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7bb6557f96-rgc7g" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.153:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.153:8443: connect: connection refused" Nov 26 07:06:44 crc kubenswrapper[4492]: I1126 07:06:44.462392 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f353995e-4082-4459-9591-efae7915aed4" path="/var/lib/kubelet/pods/f353995e-4082-4459-9591-efae7915aed4/volumes" Nov 26 07:06:44 crc kubenswrapper[4492]: I1126 07:06:44.923058 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerStarted","Data":"738525c19872e087d3461349360f85f5ff91917a09d82a2c751dd3a6428df53d"} Nov 26 07:06:44 crc kubenswrapper[4492]: I1126 07:06:44.937428 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"15799fff-2977-41be-bf39-d2cb2c360212","Type":"ContainerStarted","Data":"1e4397fe2b3811f28fab18b8cf886cdc6ab455d4d2c414980a699e274c9a3dc9"} Nov 26 07:06:45 crc kubenswrapper[4492]: I1126 07:06:45.976013 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerStarted","Data":"a166002503826650914992b89339d93a57e75f93a6f59f250df8d1fa81c30f6b"} Nov 26 07:06:47 crc kubenswrapper[4492]: I1126 07:06:47.012059 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerStarted","Data":"1b7f88bcb5a84e94dbf763c9a278cbb8e8133d86ec8f275095b9491bd1219ba0"} Nov 26 07:06:47 crc kubenswrapper[4492]: I1126 07:06:47.012605 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerStarted","Data":"c8a20e95cd2fee56a3f531a7377800bd3f0f3649ac6083bfbfd6a27edb2bb83d"} Nov 26 07:06:47 crc kubenswrapper[4492]: I1126 07:06:47.243956 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 07:06:49 crc kubenswrapper[4492]: I1126 07:06:49.031835 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerStarted","Data":"5042798291dfaba242f39acea6a2b43f278fef7f91f163a0a289f6cea3a71b88"} Nov 26 07:06:49 crc kubenswrapper[4492]: I1126 07:06:49.032599 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:06:49 crc kubenswrapper[4492]: I1126 07:06:49.055063 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.139652851 podStartE2EDuration="6.055043661s" podCreationTimestamp="2025-11-26 07:06:43 +0000 UTC" firstStartedPulling="2025-11-26 07:06:44.202639434 +0000 UTC m=+1100.086527721" lastFinishedPulling="2025-11-26 07:06:48.118030232 +0000 UTC m=+1104.001918531" observedRunningTime="2025-11-26 07:06:49.053855196 +0000 UTC m=+1104.937743495" watchObservedRunningTime="2025-11-26 07:06:49.055043661 +0000 UTC m=+1104.938931959" Nov 26 07:06:49 crc kubenswrapper[4492]: I1126 07:06:49.058643 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=8.058634683 podStartE2EDuration="8.058634683s" podCreationTimestamp="2025-11-26 07:06:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:44.964847919 +0000 UTC m=+1100.848736217" watchObservedRunningTime="2025-11-26 07:06:49.058634683 +0000 UTC m=+1104.942522981" Nov 26 07:06:49 crc kubenswrapper[4492]: I1126 07:06:49.445016 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:06:49 crc kubenswrapper[4492]: I1126 07:06:49.445136 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:06:50 crc kubenswrapper[4492]: I1126 07:06:50.128209 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:50 crc kubenswrapper[4492]: I1126 07:06:50.631682 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:50 crc kubenswrapper[4492]: I1126 07:06:50.632764 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-645d6d85d7-cvr9h" Nov 26 07:06:51 crc kubenswrapper[4492]: I1126 07:06:51.056135 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="ceilometer-central-agent" containerID="cri-o://a166002503826650914992b89339d93a57e75f93a6f59f250df8d1fa81c30f6b" gracePeriod=30 Nov 26 07:06:51 crc kubenswrapper[4492]: I1126 07:06:51.056247 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="proxy-httpd" containerID="cri-o://5042798291dfaba242f39acea6a2b43f278fef7f91f163a0a289f6cea3a71b88" gracePeriod=30 Nov 26 07:06:51 crc kubenswrapper[4492]: I1126 07:06:51.056290 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="ceilometer-notification-agent" containerID="cri-o://c8a20e95cd2fee56a3f531a7377800bd3f0f3649ac6083bfbfd6a27edb2bb83d" gracePeriod=30 Nov 26 07:06:51 crc kubenswrapper[4492]: I1126 07:06:51.056263 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="sg-core" containerID="cri-o://1b7f88bcb5a84e94dbf763c9a278cbb8e8133d86ec8f275095b9491bd1219ba0" gracePeriod=30 Nov 26 07:06:52 crc kubenswrapper[4492]: I1126 07:06:52.081444 4492 generic.go:334] "Generic (PLEG): container finished" podID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerID="5042798291dfaba242f39acea6a2b43f278fef7f91f163a0a289f6cea3a71b88" exitCode=0 Nov 26 07:06:52 crc kubenswrapper[4492]: I1126 07:06:52.081700 4492 generic.go:334] "Generic (PLEG): container finished" podID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerID="1b7f88bcb5a84e94dbf763c9a278cbb8e8133d86ec8f275095b9491bd1219ba0" exitCode=2 Nov 26 07:06:52 crc kubenswrapper[4492]: I1126 07:06:52.081711 4492 generic.go:334] "Generic (PLEG): container finished" podID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerID="c8a20e95cd2fee56a3f531a7377800bd3f0f3649ac6083bfbfd6a27edb2bb83d" exitCode=0 Nov 26 07:06:52 crc kubenswrapper[4492]: I1126 07:06:52.081720 4492 generic.go:334] "Generic (PLEG): container finished" podID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerID="a166002503826650914992b89339d93a57e75f93a6f59f250df8d1fa81c30f6b" exitCode=0 Nov 26 07:06:52 crc kubenswrapper[4492]: I1126 07:06:52.081627 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerDied","Data":"5042798291dfaba242f39acea6a2b43f278fef7f91f163a0a289f6cea3a71b88"} Nov 26 07:06:52 crc kubenswrapper[4492]: I1126 07:06:52.081770 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerDied","Data":"1b7f88bcb5a84e94dbf763c9a278cbb8e8133d86ec8f275095b9491bd1219ba0"} Nov 26 07:06:52 crc kubenswrapper[4492]: I1126 07:06:52.081788 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerDied","Data":"c8a20e95cd2fee56a3f531a7377800bd3f0f3649ac6083bfbfd6a27edb2bb83d"} Nov 26 07:06:52 crc kubenswrapper[4492]: I1126 07:06:52.081799 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerDied","Data":"a166002503826650914992b89339d93a57e75f93a6f59f250df8d1fa81c30f6b"} Nov 26 07:06:52 crc kubenswrapper[4492]: I1126 07:06:52.473940 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 07:06:54 crc kubenswrapper[4492]: I1126 07:06:54.368734 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7bb6557f96-rgc7g" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.153:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.153:8443: connect: connection refused" Nov 26 07:06:54 crc kubenswrapper[4492]: I1126 07:06:54.369355 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:06:55 crc kubenswrapper[4492]: I1126 07:06:55.941308 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.009190 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sc86\" (UniqueName: \"kubernetes.io/projected/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-kube-api-access-7sc86\") pod \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.009256 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-run-httpd\") pod \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.009372 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-config-data\") pod \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.009813 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" (UID: "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.009919 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-sg-core-conf-yaml\") pod \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.009977 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-scripts\") pod \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.010013 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-log-httpd\") pod \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.010030 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-combined-ca-bundle\") pod \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\" (UID: \"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c\") " Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.010287 4492 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.010818 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" (UID: "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.015661 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-scripts" (OuterVolumeSpecName: "scripts") pod "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" (UID: "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.016370 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-kube-api-access-7sc86" (OuterVolumeSpecName: "kube-api-access-7sc86") pod "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" (UID: "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c"). InnerVolumeSpecName "kube-api-access-7sc86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.034107 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" (UID: "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.084610 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" (UID: "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.100342 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-config-data" (OuterVolumeSpecName: "config-data") pod "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" (UID: "b4ed9f4a-ed6c-4969-92eb-f310ac991b3c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.112849 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.112890 4492 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.112907 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.112922 4492 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.112939 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.112951 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sc86\" (UniqueName: \"kubernetes.io/projected/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c-kube-api-access-7sc86\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.140898 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7d2be4f4-9426-4905-b141-a93775673428","Type":"ContainerStarted","Data":"1eedc56280b7ba0828e286e732b00d2af51e421e94e81a22377c2a5f1d9ae181"} Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.144635 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4ed9f4a-ed6c-4969-92eb-f310ac991b3c","Type":"ContainerDied","Data":"738525c19872e087d3461349360f85f5ff91917a09d82a2c751dd3a6428df53d"} Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.144691 4492 scope.go:117] "RemoveContainer" containerID="5042798291dfaba242f39acea6a2b43f278fef7f91f163a0a289f6cea3a71b88" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.144851 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.165827 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.644236014 podStartE2EDuration="16.165794973s" podCreationTimestamp="2025-11-26 07:06:40 +0000 UTC" firstStartedPulling="2025-11-26 07:06:41.17289685 +0000 UTC m=+1097.056785149" lastFinishedPulling="2025-11-26 07:06:55.69445581 +0000 UTC m=+1111.578344108" observedRunningTime="2025-11-26 07:06:56.161985852 +0000 UTC m=+1112.045874150" watchObservedRunningTime="2025-11-26 07:06:56.165794973 +0000 UTC m=+1112.049683272" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.188364 4492 scope.go:117] "RemoveContainer" containerID="1b7f88bcb5a84e94dbf763c9a278cbb8e8133d86ec8f275095b9491bd1219ba0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.208281 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.220331 4492 scope.go:117] "RemoveContainer" containerID="c8a20e95cd2fee56a3f531a7377800bd3f0f3649ac6083bfbfd6a27edb2bb83d" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.220568 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.243603 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:56 crc kubenswrapper[4492]: E1126 07:06:56.245536 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="ceilometer-notification-agent" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.245568 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="ceilometer-notification-agent" Nov 26 07:06:56 crc kubenswrapper[4492]: E1126 07:06:56.245609 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="sg-core" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.245616 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="sg-core" Nov 26 07:06:56 crc kubenswrapper[4492]: E1126 07:06:56.245647 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="ceilometer-central-agent" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.245653 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="ceilometer-central-agent" Nov 26 07:06:56 crc kubenswrapper[4492]: E1126 07:06:56.245664 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="proxy-httpd" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.245671 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="proxy-httpd" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.247065 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="ceilometer-notification-agent" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.247096 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="proxy-httpd" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.247104 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="ceilometer-central-agent" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.247121 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" containerName="sg-core" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.258283 4492 scope.go:117] "RemoveContainer" containerID="a166002503826650914992b89339d93a57e75f93a6f59f250df8d1fa81c30f6b" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.260413 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.262578 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.263049 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.270386 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.317449 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.317616 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-config-data\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.317733 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.317798 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-log-httpd\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.317868 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b2k8\" (UniqueName: \"kubernetes.io/projected/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-kube-api-access-7b2k8\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.317964 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-run-httpd\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.318029 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-scripts\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.420247 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.420308 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-log-httpd\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.420342 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b2k8\" (UniqueName: \"kubernetes.io/projected/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-kube-api-access-7b2k8\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.420405 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-run-httpd\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.420431 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-scripts\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.420451 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.420506 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-config-data\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.421889 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-run-httpd\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.422199 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-log-httpd\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.428753 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-scripts\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.429581 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.430240 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.434977 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-config-data\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.442241 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b2k8\" (UniqueName: \"kubernetes.io/projected/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-kube-api-access-7b2k8\") pod \"ceilometer-0\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " pod="openstack/ceilometer-0" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.474108 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4ed9f4a-ed6c-4969-92eb-f310ac991b3c" path="/var/lib/kubelet/pods/b4ed9f4a-ed6c-4969-92eb-f310ac991b3c/volumes" Nov 26 07:06:56 crc kubenswrapper[4492]: I1126 07:06:56.580998 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.033412 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-85bd8ddcc5-m7gs8"] Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.035612 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.052568 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.052997 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.053269 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-b2k58" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.118611 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-85bd8ddcc5-m7gs8"] Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.135129 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smnzp\" (UniqueName: \"kubernetes.io/projected/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-kube-api-access-smnzp\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.135438 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data-custom\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.135524 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.135631 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-combined-ca-bundle\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.165675 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerStarted","Data":"dc7d3e0298bc07798ea319e3ec79686e6aa63f3cd49cf535e64bb1a446dcf5e3"} Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.170280 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.195118 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74d7dcb8f-sgmxq"] Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.196813 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.208439 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74d7dcb8f-sgmxq"] Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239136 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239214 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-combined-ca-bundle\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239245 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-svc\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239328 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smnzp\" (UniqueName: \"kubernetes.io/projected/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-kube-api-access-smnzp\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239368 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-nb\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239396 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-swift-storage-0\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239430 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-config\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239449 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxv5q\" (UniqueName: \"kubernetes.io/projected/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-kube-api-access-nxv5q\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239481 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-sb\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.239507 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data-custom\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.267607 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.268022 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-combined-ca-bundle\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.272254 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7d7c7fc74d-977pm"] Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.272778 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smnzp\" (UniqueName: \"kubernetes.io/projected/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-kube-api-access-smnzp\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.273551 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data-custom\") pod \"heat-engine-85bd8ddcc5-m7gs8\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.273967 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.281811 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.288456 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7d7c7fc74d-977pm"] Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.335479 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-56748689fb-22p2v"] Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.336845 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.339164 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342522 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342595 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-nb\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342620 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-swift-storage-0\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342654 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-combined-ca-bundle\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342681 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-config\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342712 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxv5q\" (UniqueName: \"kubernetes.io/projected/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-kube-api-access-nxv5q\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342747 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-sb\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342770 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-combined-ca-bundle\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342787 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd46d\" (UniqueName: \"kubernetes.io/projected/5d9c5649-030f-45ea-9683-0570ed0435ab-kube-api-access-dd46d\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342819 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data-custom\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342838 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-svc\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342855 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hww7r\" (UniqueName: \"kubernetes.io/projected/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-kube-api-access-hww7r\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342875 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data-custom\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.342905 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.343739 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-nb\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.345507 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-config\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.346267 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-svc\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.348395 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-swift-storage-0\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.348449 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-sb\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.354510 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.370864 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxv5q\" (UniqueName: \"kubernetes.io/projected/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-kube-api-access-nxv5q\") pod \"dnsmasq-dns-74d7dcb8f-sgmxq\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.401609 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-56748689fb-22p2v"] Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.446873 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-combined-ca-bundle\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.447491 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dd46d\" (UniqueName: \"kubernetes.io/projected/5d9c5649-030f-45ea-9683-0570ed0435ab-kube-api-access-dd46d\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.447548 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data-custom\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.447566 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hww7r\" (UniqueName: \"kubernetes.io/projected/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-kube-api-access-hww7r\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.447584 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data-custom\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.447609 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.447648 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.447718 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-combined-ca-bundle\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.451334 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-combined-ca-bundle\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.455295 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data-custom\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.457714 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-combined-ca-bundle\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.459125 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data-custom\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.461864 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.467711 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.470782 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hww7r\" (UniqueName: \"kubernetes.io/projected/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-kube-api-access-hww7r\") pod \"heat-api-56748689fb-22p2v\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.470835 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd46d\" (UniqueName: \"kubernetes.io/projected/5d9c5649-030f-45ea-9683-0570ed0435ab-kube-api-access-dd46d\") pod \"heat-cfnapi-7d7c7fc74d-977pm\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.518294 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.666843 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.685893 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:06:57 crc kubenswrapper[4492]: I1126 07:06:57.961062 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-85bd8ddcc5-m7gs8"] Nov 26 07:06:58 crc kubenswrapper[4492]: I1126 07:06:58.186433 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerStarted","Data":"668b2bb36cb1b17711057eca172a0ad78a9bdd91556a97cb0b80daff59f82706"} Nov 26 07:06:58 crc kubenswrapper[4492]: I1126 07:06:58.195536 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" event={"ID":"27be14bd-76c4-41ab-bef2-0b1bcc13e0df","Type":"ContainerStarted","Data":"f0fd6a44937d8fd3b20a558da35f27d8a52d11fe50191816135fc613bf955950"} Nov 26 07:06:58 crc kubenswrapper[4492]: I1126 07:06:58.232901 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74d7dcb8f-sgmxq"] Nov 26 07:06:58 crc kubenswrapper[4492]: W1126 07:06:58.242187 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0edf899_a089_4aca_8ad3_d8e6f8b8cc14.slice/crio-d29b4479f6f3b9325622d7e437409b90c32911b70f6add57b8711428951cf6b3 WatchSource:0}: Error finding container d29b4479f6f3b9325622d7e437409b90c32911b70f6add57b8711428951cf6b3: Status 404 returned error can't find the container with id d29b4479f6f3b9325622d7e437409b90c32911b70f6add57b8711428951cf6b3 Nov 26 07:06:58 crc kubenswrapper[4492]: I1126 07:06:58.407901 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-56748689fb-22p2v"] Nov 26 07:06:58 crc kubenswrapper[4492]: I1126 07:06:58.520285 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7d7c7fc74d-977pm"] Nov 26 07:06:58 crc kubenswrapper[4492]: W1126 07:06:58.554956 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d9c5649_030f_45ea_9683_0570ed0435ab.slice/crio-571a156be39fe51f0b7f3be32d420a94c1310f0be117d19b0fe987b1dc15501d WatchSource:0}: Error finding container 571a156be39fe51f0b7f3be32d420a94c1310f0be117d19b0fe987b1dc15501d: Status 404 returned error can't find the container with id 571a156be39fe51f0b7f3be32d420a94c1310f0be117d19b0fe987b1dc15501d Nov 26 07:06:59 crc kubenswrapper[4492]: I1126 07:06:59.204015 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" event={"ID":"27be14bd-76c4-41ab-bef2-0b1bcc13e0df","Type":"ContainerStarted","Data":"040c72d181fef2f419d296b4226ad9596759b979def4f787fe5e843dcafb65e6"} Nov 26 07:06:59 crc kubenswrapper[4492]: I1126 07:06:59.204343 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:06:59 crc kubenswrapper[4492]: I1126 07:06:59.204924 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" event={"ID":"5d9c5649-030f-45ea-9683-0570ed0435ab","Type":"ContainerStarted","Data":"571a156be39fe51f0b7f3be32d420a94c1310f0be117d19b0fe987b1dc15501d"} Nov 26 07:06:59 crc kubenswrapper[4492]: I1126 07:06:59.206314 4492 generic.go:334] "Generic (PLEG): container finished" podID="d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" containerID="f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da" exitCode=0 Nov 26 07:06:59 crc kubenswrapper[4492]: I1126 07:06:59.206355 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" event={"ID":"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14","Type":"ContainerDied","Data":"f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da"} Nov 26 07:06:59 crc kubenswrapper[4492]: I1126 07:06:59.206373 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" event={"ID":"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14","Type":"ContainerStarted","Data":"d29b4479f6f3b9325622d7e437409b90c32911b70f6add57b8711428951cf6b3"} Nov 26 07:06:59 crc kubenswrapper[4492]: I1126 07:06:59.209016 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-56748689fb-22p2v" event={"ID":"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6","Type":"ContainerStarted","Data":"583f527977da4112889ccb26766da6d0355da5c45d726fb0df0b2a7ab2f33a46"} Nov 26 07:06:59 crc kubenswrapper[4492]: I1126 07:06:59.211069 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerStarted","Data":"d67947dcdc5f696f6f41a22fb29282515796fecda0296cb403bc65ebbf15cf00"} Nov 26 07:06:59 crc kubenswrapper[4492]: I1126 07:06:59.228469 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" podStartSLOduration=2.228452794 podStartE2EDuration="2.228452794s" podCreationTimestamp="2025-11-26 07:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:06:59.223146668 +0000 UTC m=+1115.107034966" watchObservedRunningTime="2025-11-26 07:06:59.228452794 +0000 UTC m=+1115.112341093" Nov 26 07:07:00 crc kubenswrapper[4492]: I1126 07:07:00.233037 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" event={"ID":"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14","Type":"ContainerStarted","Data":"3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a"} Nov 26 07:07:00 crc kubenswrapper[4492]: I1126 07:07:00.233886 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:07:00 crc kubenswrapper[4492]: I1126 07:07:00.242651 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerStarted","Data":"bdec316493e10dfa4453c2100c9906ac89eab1d8422ceaec9088e4c01199ac09"} Nov 26 07:07:00 crc kubenswrapper[4492]: I1126 07:07:00.264895 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" podStartSLOduration=3.264885757 podStartE2EDuration="3.264885757s" podCreationTimestamp="2025-11-26 07:06:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:00.256835061 +0000 UTC m=+1116.140723359" watchObservedRunningTime="2025-11-26 07:07:00.264885757 +0000 UTC m=+1116.148774055" Nov 26 07:07:01 crc kubenswrapper[4492]: I1126 07:07:01.261096 4492 generic.go:334] "Generic (PLEG): container finished" podID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerID="1ae7aee7de996ae15f21a63313f4987ac66b6771692515d2a9f4fbf55b2e1331" exitCode=137 Nov 26 07:07:01 crc kubenswrapper[4492]: I1126 07:07:01.263193 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb6557f96-rgc7g" event={"ID":"a15ec528-9195-4dfe-95b7-e30a44f74b44","Type":"ContainerDied","Data":"1ae7aee7de996ae15f21a63313f4987ac66b6771692515d2a9f4fbf55b2e1331"} Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.808707 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.928048 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-tls-certs\") pod \"a15ec528-9195-4dfe-95b7-e30a44f74b44\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.928127 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-config-data\") pod \"a15ec528-9195-4dfe-95b7-e30a44f74b44\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.928159 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-secret-key\") pod \"a15ec528-9195-4dfe-95b7-e30a44f74b44\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.928302 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-combined-ca-bundle\") pod \"a15ec528-9195-4dfe-95b7-e30a44f74b44\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.928352 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-scripts\") pod \"a15ec528-9195-4dfe-95b7-e30a44f74b44\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.928390 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15ec528-9195-4dfe-95b7-e30a44f74b44-logs\") pod \"a15ec528-9195-4dfe-95b7-e30a44f74b44\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.928430 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z25fk\" (UniqueName: \"kubernetes.io/projected/a15ec528-9195-4dfe-95b7-e30a44f74b44-kube-api-access-z25fk\") pod \"a15ec528-9195-4dfe-95b7-e30a44f74b44\" (UID: \"a15ec528-9195-4dfe-95b7-e30a44f74b44\") " Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.931588 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a15ec528-9195-4dfe-95b7-e30a44f74b44-logs" (OuterVolumeSpecName: "logs") pod "a15ec528-9195-4dfe-95b7-e30a44f74b44" (UID: "a15ec528-9195-4dfe-95b7-e30a44f74b44"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.956327 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a15ec528-9195-4dfe-95b7-e30a44f74b44-kube-api-access-z25fk" (OuterVolumeSpecName: "kube-api-access-z25fk") pod "a15ec528-9195-4dfe-95b7-e30a44f74b44" (UID: "a15ec528-9195-4dfe-95b7-e30a44f74b44"). InnerVolumeSpecName "kube-api-access-z25fk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:02 crc kubenswrapper[4492]: I1126 07:07:02.966978 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "a15ec528-9195-4dfe-95b7-e30a44f74b44" (UID: "a15ec528-9195-4dfe-95b7-e30a44f74b44"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.014746 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a15ec528-9195-4dfe-95b7-e30a44f74b44" (UID: "a15ec528-9195-4dfe-95b7-e30a44f74b44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.019788 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-config-data" (OuterVolumeSpecName: "config-data") pod "a15ec528-9195-4dfe-95b7-e30a44f74b44" (UID: "a15ec528-9195-4dfe-95b7-e30a44f74b44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.032691 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a15ec528-9195-4dfe-95b7-e30a44f74b44-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.032722 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z25fk\" (UniqueName: \"kubernetes.io/projected/a15ec528-9195-4dfe-95b7-e30a44f74b44-kube-api-access-z25fk\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.032736 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.032745 4492 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.032782 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.035325 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-scripts" (OuterVolumeSpecName: "scripts") pod "a15ec528-9195-4dfe-95b7-e30a44f74b44" (UID: "a15ec528-9195-4dfe-95b7-e30a44f74b44"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.066703 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "a15ec528-9195-4dfe-95b7-e30a44f74b44" (UID: "a15ec528-9195-4dfe-95b7-e30a44f74b44"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.136062 4492 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a15ec528-9195-4dfe-95b7-e30a44f74b44-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.136099 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a15ec528-9195-4dfe-95b7-e30a44f74b44-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.283366 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" event={"ID":"5d9c5649-030f-45ea-9683-0570ed0435ab","Type":"ContainerStarted","Data":"99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081"} Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.284307 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.285517 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-56748689fb-22p2v" event={"ID":"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6","Type":"ContainerStarted","Data":"4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770"} Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.285916 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.287772 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerStarted","Data":"8ecf28d6b52e685c2180672cf54db50c38cfa7a629469233361c43d6f8be3b7a"} Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.288253 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.289844 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7bb6557f96-rgc7g" event={"ID":"a15ec528-9195-4dfe-95b7-e30a44f74b44","Type":"ContainerDied","Data":"85e4d5fcd357f218da5d70be2a09539ee24903ed48cfbf53ab2597b6149b8bf4"} Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.289875 4492 scope.go:117] "RemoveContainer" containerID="43fafd6a195677c69054be8121016986713c956dc138b464b122d51ce9a8af53" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.289920 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7bb6557f96-rgc7g" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.326839 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" podStartSLOduration=2.228636679 podStartE2EDuration="6.326821528s" podCreationTimestamp="2025-11-26 07:06:57 +0000 UTC" firstStartedPulling="2025-11-26 07:06:58.562216229 +0000 UTC m=+1114.446104527" lastFinishedPulling="2025-11-26 07:07:02.660401078 +0000 UTC m=+1118.544289376" observedRunningTime="2025-11-26 07:07:03.312612925 +0000 UTC m=+1119.196501213" watchObservedRunningTime="2025-11-26 07:07:03.326821528 +0000 UTC m=+1119.210709827" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.408756 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-56748689fb-22p2v" podStartSLOduration=2.267866034 podStartE2EDuration="6.40872879s" podCreationTimestamp="2025-11-26 07:06:57 +0000 UTC" firstStartedPulling="2025-11-26 07:06:58.504772149 +0000 UTC m=+1114.388660447" lastFinishedPulling="2025-11-26 07:07:02.645634906 +0000 UTC m=+1118.529523203" observedRunningTime="2025-11-26 07:07:03.353809609 +0000 UTC m=+1119.237697906" watchObservedRunningTime="2025-11-26 07:07:03.40872879 +0000 UTC m=+1119.292617078" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.432968 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9200608780000001 podStartE2EDuration="7.43295076s" podCreationTimestamp="2025-11-26 07:06:56 +0000 UTC" firstStartedPulling="2025-11-26 07:06:57.151208376 +0000 UTC m=+1113.035096674" lastFinishedPulling="2025-11-26 07:07:02.664098258 +0000 UTC m=+1118.547986556" observedRunningTime="2025-11-26 07:07:03.393541767 +0000 UTC m=+1119.277430065" watchObservedRunningTime="2025-11-26 07:07:03.43295076 +0000 UTC m=+1119.316839048" Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.454247 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7bb6557f96-rgc7g"] Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.477419 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7bb6557f96-rgc7g"] Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.493897 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:03 crc kubenswrapper[4492]: I1126 07:07:03.595224 4492 scope.go:117] "RemoveContainer" containerID="1ae7aee7de996ae15f21a63313f4987ac66b6771692515d2a9f4fbf55b2e1331" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.297264 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-c57f564f4-75zkc"] Nov 26 07:07:04 crc kubenswrapper[4492]: E1126 07:07:04.298090 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.298113 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon" Nov 26 07:07:04 crc kubenswrapper[4492]: E1126 07:07:04.298151 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon-log" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.298158 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon-log" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.298511 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.298535 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" containerName="horizon-log" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.299664 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.321232 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-65c87b98b5-7kjlx"] Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.322714 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.330603 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-c57f564f4-75zkc"] Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.341807 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-65c87b98b5-7kjlx"] Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.382605 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-75f4bc59f-8qztx"] Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.382641 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-combined-ca-bundle\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.382684 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzp98\" (UniqueName: \"kubernetes.io/projected/c7817ef7-6407-49f2-8f5b-3357945f0ec0-kube-api-access-hzp98\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.382772 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data-custom\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.382809 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.382844 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5907e115-0ab5-43b1-8173-8b5bc030137d-config-data\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.382890 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7fq7\" (UniqueName: \"kubernetes.io/projected/5907e115-0ab5-43b1-8173-8b5bc030137d-kube-api-access-k7fq7\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.382921 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5907e115-0ab5-43b1-8173-8b5bc030137d-config-data-custom\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.382959 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5907e115-0ab5-43b1-8173-8b5bc030137d-combined-ca-bundle\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.384123 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.402949 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-75f4bc59f-8qztx"] Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.457117 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a15ec528-9195-4dfe-95b7-e30a44f74b44" path="/var/lib/kubelet/pods/a15ec528-9195-4dfe-95b7-e30a44f74b44/volumes" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.484925 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data-custom\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485017 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485057 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5907e115-0ab5-43b1-8173-8b5bc030137d-config-data\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485097 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7fq7\" (UniqueName: \"kubernetes.io/projected/5907e115-0ab5-43b1-8173-8b5bc030137d-kube-api-access-k7fq7\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485127 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5907e115-0ab5-43b1-8173-8b5bc030137d-config-data-custom\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485152 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5907e115-0ab5-43b1-8173-8b5bc030137d-combined-ca-bundle\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485207 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98srx\" (UniqueName: \"kubernetes.io/projected/3e950c7f-2241-4475-a891-d97102b54b9b-kube-api-access-98srx\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485250 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-combined-ca-bundle\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485270 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-combined-ca-bundle\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485289 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzp98\" (UniqueName: \"kubernetes.io/projected/c7817ef7-6407-49f2-8f5b-3357945f0ec0-kube-api-access-hzp98\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485312 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.485374 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data-custom\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.491269 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.491929 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5907e115-0ab5-43b1-8173-8b5bc030137d-combined-ca-bundle\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.495723 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5907e115-0ab5-43b1-8173-8b5bc030137d-config-data-custom\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.497990 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5907e115-0ab5-43b1-8173-8b5bc030137d-config-data\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.501840 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data-custom\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.504055 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7fq7\" (UniqueName: \"kubernetes.io/projected/5907e115-0ab5-43b1-8173-8b5bc030137d-kube-api-access-k7fq7\") pod \"heat-engine-c57f564f4-75zkc\" (UID: \"5907e115-0ab5-43b1-8173-8b5bc030137d\") " pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.504720 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-combined-ca-bundle\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.518809 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzp98\" (UniqueName: \"kubernetes.io/projected/c7817ef7-6407-49f2-8f5b-3357945f0ec0-kube-api-access-hzp98\") pod \"heat-cfnapi-65c87b98b5-7kjlx\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.588545 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98srx\" (UniqueName: \"kubernetes.io/projected/3e950c7f-2241-4475-a891-d97102b54b9b-kube-api-access-98srx\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.588629 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-combined-ca-bundle\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.588656 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.588725 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data-custom\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.594346 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.594848 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-combined-ca-bundle\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.595617 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data-custom\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.611788 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98srx\" (UniqueName: \"kubernetes.io/projected/3e950c7f-2241-4475-a891-d97102b54b9b-kube-api-access-98srx\") pod \"heat-api-75f4bc59f-8qztx\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.631756 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.670591 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:04 crc kubenswrapper[4492]: I1126 07:07:04.706694 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.107117 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-c57f564f4-75zkc"] Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.249411 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-75f4bc59f-8qztx"] Nov 26 07:07:05 crc kubenswrapper[4492]: W1126 07:07:05.264074 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e950c7f_2241_4475_a891_d97102b54b9b.slice/crio-e07f88b8a42caa8558b73b18d59d7a961e27d39b2719d3aca4a9bfa88972abdc WatchSource:0}: Error finding container e07f88b8a42caa8558b73b18d59d7a961e27d39b2719d3aca4a9bfa88972abdc: Status 404 returned error can't find the container with id e07f88b8a42caa8558b73b18d59d7a961e27d39b2719d3aca4a9bfa88972abdc Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.272871 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-65c87b98b5-7kjlx"] Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.331792 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-75f4bc59f-8qztx" event={"ID":"3e950c7f-2241-4475-a891-d97102b54b9b","Type":"ContainerStarted","Data":"e07f88b8a42caa8558b73b18d59d7a961e27d39b2719d3aca4a9bfa88972abdc"} Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.339126 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" event={"ID":"c7817ef7-6407-49f2-8f5b-3357945f0ec0","Type":"ContainerStarted","Data":"dfe55b887b861af9b7f4abe0123b6da00e6621af7fa9aa657a41cbf3198b57b0"} Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.347998 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-c57f564f4-75zkc" event={"ID":"5907e115-0ab5-43b1-8173-8b5bc030137d","Type":"ContainerStarted","Data":"e10d81013cc15d567e192f46da0b9e300f09bcc22cb81bf395fbdf6362e5e604"} Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.348221 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="ceilometer-central-agent" containerID="cri-o://668b2bb36cb1b17711057eca172a0ad78a9bdd91556a97cb0b80daff59f82706" gracePeriod=30 Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.348849 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="sg-core" containerID="cri-o://bdec316493e10dfa4453c2100c9906ac89eab1d8422ceaec9088e4c01199ac09" gracePeriod=30 Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.348919 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="proxy-httpd" containerID="cri-o://8ecf28d6b52e685c2180672cf54db50c38cfa7a629469233361c43d6f8be3b7a" gracePeriod=30 Nov 26 07:07:05 crc kubenswrapper[4492]: I1126 07:07:05.348991 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="ceilometer-notification-agent" containerID="cri-o://d67947dcdc5f696f6f41a22fb29282515796fecda0296cb403bc65ebbf15cf00" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.196487 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-56748689fb-22p2v"] Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.201425 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7d7c7fc74d-977pm"] Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.236337 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-cc797855b-f4zsl"] Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.237835 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.245882 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-564669c98c-fvvts"] Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.247305 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.247765 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.249313 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.249348 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.249502 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.273057 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.273452 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="37ed6b48-37b7-479f-837f-d49432778c49" containerName="glance-log" containerID="cri-o://56f8eb6952e555b8950f54a9952d93a5f0c9484e60f683a0a4cfaa23ea23f274" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.273709 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="37ed6b48-37b7-479f-837f-d49432778c49" containerName="glance-httpd" containerID="cri-o://f0bcd12fa4afc6ff4c40910d74c5855c7cc966cd3bd6cc0b480334ee5744bacd" gracePeriod=30 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.284531 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-cc797855b-f4zsl"] Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.297117 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-564669c98c-fvvts"] Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.332590 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-combined-ca-bundle\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.332921 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-public-tls-certs\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.332986 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-internal-tls-certs\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.333046 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-config-data-custom\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.333091 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbs6g\" (UniqueName: \"kubernetes.io/projected/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-kube-api-access-dbs6g\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.333158 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-config-data\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.333196 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncjkn\" (UniqueName: \"kubernetes.io/projected/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-kube-api-access-ncjkn\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.333227 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-internal-tls-certs\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.333259 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-config-data-custom\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.333278 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-config-data\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.333398 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-combined-ca-bundle\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.333422 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-public-tls-certs\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.373973 4492 generic.go:334] "Generic (PLEG): container finished" podID="3e950c7f-2241-4475-a891-d97102b54b9b" containerID="30c9e46507ae307e57f6c78265f36ccf1e6051442d252ab0b0bf5310326ff4f9" exitCode=1 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.374033 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-75f4bc59f-8qztx" event={"ID":"3e950c7f-2241-4475-a891-d97102b54b9b","Type":"ContainerDied","Data":"30c9e46507ae307e57f6c78265f36ccf1e6051442d252ab0b0bf5310326ff4f9"} Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.374843 4492 scope.go:117] "RemoveContainer" containerID="30c9e46507ae307e57f6c78265f36ccf1e6051442d252ab0b0bf5310326ff4f9" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.379033 4492 generic.go:334] "Generic (PLEG): container finished" podID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" containerID="18cbb279cf481eaa5e2b0c5d654d9e17548561ba519536b900089d8c0bf14865" exitCode=1 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.379435 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" event={"ID":"c7817ef7-6407-49f2-8f5b-3357945f0ec0","Type":"ContainerDied","Data":"18cbb279cf481eaa5e2b0c5d654d9e17548561ba519536b900089d8c0bf14865"} Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.379800 4492 scope.go:117] "RemoveContainer" containerID="18cbb279cf481eaa5e2b0c5d654d9e17548561ba519536b900089d8c0bf14865" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.383922 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-c57f564f4-75zkc" event={"ID":"5907e115-0ab5-43b1-8173-8b5bc030137d","Type":"ContainerStarted","Data":"ad73ad2bd4d176784432675b9c52102bfd69284669a767b96caf9928e8877a3e"} Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.384403 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.395042 4492 generic.go:334] "Generic (PLEG): container finished" podID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerID="8ecf28d6b52e685c2180672cf54db50c38cfa7a629469233361c43d6f8be3b7a" exitCode=0 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.395072 4492 generic.go:334] "Generic (PLEG): container finished" podID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerID="bdec316493e10dfa4453c2100c9906ac89eab1d8422ceaec9088e4c01199ac09" exitCode=2 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.395081 4492 generic.go:334] "Generic (PLEG): container finished" podID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerID="d67947dcdc5f696f6f41a22fb29282515796fecda0296cb403bc65ebbf15cf00" exitCode=0 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.395262 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" podUID="5d9c5649-030f-45ea-9683-0570ed0435ab" containerName="heat-cfnapi" containerID="cri-o://99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081" gracePeriod=60 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.395352 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerDied","Data":"8ecf28d6b52e685c2180672cf54db50c38cfa7a629469233361c43d6f8be3b7a"} Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.395380 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerDied","Data":"bdec316493e10dfa4453c2100c9906ac89eab1d8422ceaec9088e4c01199ac09"} Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.395391 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerDied","Data":"d67947dcdc5f696f6f41a22fb29282515796fecda0296cb403bc65ebbf15cf00"} Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.395487 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-56748689fb-22p2v" podUID="5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" containerName="heat-api" containerID="cri-o://4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770" gracePeriod=60 Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436518 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-combined-ca-bundle\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436598 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-public-tls-certs\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436644 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-internal-tls-certs\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436687 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-config-data-custom\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436721 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbs6g\" (UniqueName: \"kubernetes.io/projected/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-kube-api-access-dbs6g\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436759 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-config-data\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436785 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncjkn\" (UniqueName: \"kubernetes.io/projected/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-kube-api-access-ncjkn\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436805 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-internal-tls-certs\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436830 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-config-data-custom\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436845 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-config-data\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436928 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-combined-ca-bundle\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.436954 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-public-tls-certs\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.448527 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-internal-tls-certs\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.448871 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-combined-ca-bundle\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.449226 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-public-tls-certs\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.453411 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-public-tls-certs\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.462685 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-c57f564f4-75zkc" podStartSLOduration=2.462675052 podStartE2EDuration="2.462675052s" podCreationTimestamp="2025-11-26 07:07:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:06.455610049 +0000 UTC m=+1122.339498348" watchObservedRunningTime="2025-11-26 07:07:06.462675052 +0000 UTC m=+1122.346563350" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.466407 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-internal-tls-certs\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.471808 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-config-data-custom\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.471896 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncjkn\" (UniqueName: \"kubernetes.io/projected/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-kube-api-access-ncjkn\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.471907 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-config-data-custom\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.472132 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-config-data\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.473974 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-config-data\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.480352 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbs6g\" (UniqueName: \"kubernetes.io/projected/0b009a36-769a-4f9e-a2ac-a188dd0c5dd1-kube-api-access-dbs6g\") pod \"heat-api-564669c98c-fvvts\" (UID: \"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1\") " pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.480990 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4e68b8f-9411-48b7-85b4-a0d5dcae77e1-combined-ca-bundle\") pod \"heat-cfnapi-cc797855b-f4zsl\" (UID: \"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1\") " pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.566999 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:06 crc kubenswrapper[4492]: I1126 07:07:06.575328 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.097022 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-2mcb9"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.098834 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.101712 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-2mcb9"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.135848 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-cc797855b-f4zsl"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.161062 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9q28f\" (UniqueName: \"kubernetes.io/projected/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-kube-api-access-9q28f\") pod \"nova-api-db-create-2mcb9\" (UID: \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\") " pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.161201 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-operator-scripts\") pod \"nova-api-db-create-2mcb9\" (UID: \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\") " pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.189761 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-564669c98c-fvvts"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.265470 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-operator-scripts\") pod \"nova-api-db-create-2mcb9\" (UID: \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\") " pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.265593 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9q28f\" (UniqueName: \"kubernetes.io/projected/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-kube-api-access-9q28f\") pod \"nova-api-db-create-2mcb9\" (UID: \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\") " pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.266815 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-operator-scripts\") pod \"nova-api-db-create-2mcb9\" (UID: \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\") " pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.274237 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-cfv7t"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.275565 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.333014 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9q28f\" (UniqueName: \"kubernetes.io/projected/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-kube-api-access-9q28f\") pod \"nova-api-db-create-2mcb9\" (UID: \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\") " pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.351030 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.360831 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.363811 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-cfv7t"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.369498 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae7dd4ac-dc69-4186-923f-40616d7fbea6-operator-scripts\") pod \"nova-cell0-db-create-cfv7t\" (UID: \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\") " pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.390963 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2dg5\" (UniqueName: \"kubernetes.io/projected/ae7dd4ac-dc69-4186-923f-40616d7fbea6-kube-api-access-t2dg5\") pod \"nova-cell0-db-create-cfv7t\" (UID: \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\") " pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.413776 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-b365-account-create-update-4vpg6"] Nov 26 07:07:07 crc kubenswrapper[4492]: E1126 07:07:07.415013 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d9c5649-030f-45ea-9683-0570ed0435ab" containerName="heat-cfnapi" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.415035 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d9c5649-030f-45ea-9683-0570ed0435ab" containerName="heat-cfnapi" Nov 26 07:07:07 crc kubenswrapper[4492]: E1126 07:07:07.415101 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" containerName="heat-api" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.415113 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" containerName="heat-api" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.415540 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d9c5649-030f-45ea-9683-0570ed0435ab" containerName="heat-cfnapi" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.415720 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" containerName="heat-api" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.422888 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.431948 4492 generic.go:334] "Generic (PLEG): container finished" podID="5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" containerID="4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770" exitCode=0 Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.432122 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-56748689fb-22p2v" event={"ID":"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6","Type":"ContainerDied","Data":"4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770"} Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.432232 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-56748689fb-22p2v" event={"ID":"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6","Type":"ContainerDied","Data":"583f527977da4112889ccb26766da6d0355da5c45d726fb0df0b2a7ab2f33a46"} Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.432390 4492 scope.go:117] "RemoveContainer" containerID="4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.432722 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-56748689fb-22p2v" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.433195 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.438078 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-cc797855b-f4zsl" event={"ID":"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1","Type":"ContainerStarted","Data":"85264f03c186958e8fbebe734d9e9690316537d82d5c205cfade8fdd79f48d24"} Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.450514 4492 generic.go:334] "Generic (PLEG): container finished" podID="3e950c7f-2241-4475-a891-d97102b54b9b" containerID="aef0af39dc68fdb9a04bc4ec82267b35281b86211b9b0adf8c987803d81f21f4" exitCode=1 Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.450606 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-75f4bc59f-8qztx" event={"ID":"3e950c7f-2241-4475-a891-d97102b54b9b","Type":"ContainerDied","Data":"aef0af39dc68fdb9a04bc4ec82267b35281b86211b9b0adf8c987803d81f21f4"} Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.451155 4492 scope.go:117] "RemoveContainer" containerID="aef0af39dc68fdb9a04bc4ec82267b35281b86211b9b0adf8c987803d81f21f4" Nov 26 07:07:07 crc kubenswrapper[4492]: E1126 07:07:07.453279 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-75f4bc59f-8qztx_openstack(3e950c7f-2241-4475-a891-d97102b54b9b)\"" pod="openstack/heat-api-75f4bc59f-8qztx" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.470613 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-6xr2x"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.472337 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-564669c98c-fvvts" event={"ID":"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1","Type":"ContainerStarted","Data":"e1c6e027e348a36cca74bef0b39ebc7845432c2c3fa2f7de5e705d80e4d1098b"} Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.472462 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.495624 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b365-account-create-update-4vpg6"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.496216 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hww7r\" (UniqueName: \"kubernetes.io/projected/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-kube-api-access-hww7r\") pod \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.496326 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dd46d\" (UniqueName: \"kubernetes.io/projected/5d9c5649-030f-45ea-9683-0570ed0435ab-kube-api-access-dd46d\") pod \"5d9c5649-030f-45ea-9683-0570ed0435ab\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.496502 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data\") pod \"5d9c5649-030f-45ea-9683-0570ed0435ab\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.496556 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data\") pod \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.496572 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-combined-ca-bundle\") pod \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.496652 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data-custom\") pod \"5d9c5649-030f-45ea-9683-0570ed0435ab\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.496674 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-combined-ca-bundle\") pod \"5d9c5649-030f-45ea-9683-0570ed0435ab\" (UID: \"5d9c5649-030f-45ea-9683-0570ed0435ab\") " Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.496729 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data-custom\") pod \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\" (UID: \"5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6\") " Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.497073 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb6703a6-97ba-4d95-8094-647d0baec33e-operator-scripts\") pod \"nova-api-b365-account-create-update-4vpg6\" (UID: \"cb6703a6-97ba-4d95-8094-647d0baec33e\") " pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.497130 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7xj8\" (UniqueName: \"kubernetes.io/projected/cb6703a6-97ba-4d95-8094-647d0baec33e-kube-api-access-v7xj8\") pod \"nova-api-b365-account-create-update-4vpg6\" (UID: \"cb6703a6-97ba-4d95-8094-647d0baec33e\") " pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.497191 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae7dd4ac-dc69-4186-923f-40616d7fbea6-operator-scripts\") pod \"nova-cell0-db-create-cfv7t\" (UID: \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\") " pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.497220 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2dg5\" (UniqueName: \"kubernetes.io/projected/ae7dd4ac-dc69-4186-923f-40616d7fbea6-kube-api-access-t2dg5\") pod \"nova-cell0-db-create-cfv7t\" (UID: \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\") " pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.499373 4492 generic.go:334] "Generic (PLEG): container finished" podID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" containerID="ad49995e2aecaba86e1a66b69a5c31ce8b84c3fcda1fb95b90b721c027caf77a" exitCode=1 Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.499425 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.499745 4492 scope.go:117] "RemoveContainer" containerID="ad49995e2aecaba86e1a66b69a5c31ce8b84c3fcda1fb95b90b721c027caf77a" Nov 26 07:07:07 crc kubenswrapper[4492]: E1126 07:07:07.499955 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-65c87b98b5-7kjlx_openstack(c7817ef7-6407-49f2-8f5b-3357945f0ec0)\"" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.499435 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" event={"ID":"c7817ef7-6407-49f2-8f5b-3357945f0ec0","Type":"ContainerDied","Data":"ad49995e2aecaba86e1a66b69a5c31ce8b84c3fcda1fb95b90b721c027caf77a"} Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.518020 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-6xr2x"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.523045 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae7dd4ac-dc69-4186-923f-40616d7fbea6-operator-scripts\") pod \"nova-cell0-db-create-cfv7t\" (UID: \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\") " pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.523722 4492 scope.go:117] "RemoveContainer" containerID="4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.527549 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-kube-api-access-hww7r" (OuterVolumeSpecName: "kube-api-access-hww7r") pod "5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" (UID: "5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6"). InnerVolumeSpecName "kube-api-access-hww7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.532167 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" (UID: "5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.534493 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:07:07 crc kubenswrapper[4492]: E1126 07:07:07.535108 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770\": container with ID starting with 4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770 not found: ID does not exist" containerID="4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.535216 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770"} err="failed to get container status \"4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770\": rpc error: code = NotFound desc = could not find container \"4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770\": container with ID starting with 4d8c2ce102cfea313d89a6074e18790072033b9124492ddc9d1c8b32cb2db770 not found: ID does not exist" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.535307 4492 scope.go:117] "RemoveContainer" containerID="30c9e46507ae307e57f6c78265f36ccf1e6051442d252ab0b0bf5310326ff4f9" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.539452 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2dg5\" (UniqueName: \"kubernetes.io/projected/ae7dd4ac-dc69-4186-923f-40616d7fbea6-kube-api-access-t2dg5\") pod \"nova-cell0-db-create-cfv7t\" (UID: \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\") " pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.544443 4492 generic.go:334] "Generic (PLEG): container finished" podID="37ed6b48-37b7-479f-837f-d49432778c49" containerID="56f8eb6952e555b8950f54a9952d93a5f0c9484e60f683a0a4cfaa23ea23f274" exitCode=143 Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.544500 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37ed6b48-37b7-479f-837f-d49432778c49","Type":"ContainerDied","Data":"56f8eb6952e555b8950f54a9952d93a5f0c9484e60f683a0a4cfaa23ea23f274"} Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.547281 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5d9c5649-030f-45ea-9683-0570ed0435ab" (UID: "5d9c5649-030f-45ea-9683-0570ed0435ab"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.549212 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d9c5649-030f-45ea-9683-0570ed0435ab-kube-api-access-dd46d" (OuterVolumeSpecName: "kube-api-access-dd46d") pod "5d9c5649-030f-45ea-9683-0570ed0435ab" (UID: "5d9c5649-030f-45ea-9683-0570ed0435ab"). InnerVolumeSpecName "kube-api-access-dd46d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.581691 4492 generic.go:334] "Generic (PLEG): container finished" podID="5d9c5649-030f-45ea-9683-0570ed0435ab" containerID="99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081" exitCode=0 Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.586707 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.587095 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" event={"ID":"5d9c5649-030f-45ea-9683-0570ed0435ab","Type":"ContainerDied","Data":"99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081"} Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.587128 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d7c7fc74d-977pm" event={"ID":"5d9c5649-030f-45ea-9683-0570ed0435ab","Type":"ContainerDied","Data":"571a156be39fe51f0b7f3be32d420a94c1310f0be117d19b0fe987b1dc15501d"} Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.605305 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b50126-fe92-4e0e-a31a-e5c40823949c-operator-scripts\") pod \"nova-cell1-db-create-6xr2x\" (UID: \"e9b50126-fe92-4e0e-a31a-e5c40823949c\") " pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.605353 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb6703a6-97ba-4d95-8094-647d0baec33e-operator-scripts\") pod \"nova-api-b365-account-create-update-4vpg6\" (UID: \"cb6703a6-97ba-4d95-8094-647d0baec33e\") " pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.605537 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7xj8\" (UniqueName: \"kubernetes.io/projected/cb6703a6-97ba-4d95-8094-647d0baec33e-kube-api-access-v7xj8\") pod \"nova-api-b365-account-create-update-4vpg6\" (UID: \"cb6703a6-97ba-4d95-8094-647d0baec33e\") " pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.605712 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6l82\" (UniqueName: \"kubernetes.io/projected/e9b50126-fe92-4e0e-a31a-e5c40823949c-kube-api-access-b6l82\") pod \"nova-cell1-db-create-6xr2x\" (UID: \"e9b50126-fe92-4e0e-a31a-e5c40823949c\") " pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.605898 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dd46d\" (UniqueName: \"kubernetes.io/projected/5d9c5649-030f-45ea-9683-0570ed0435ab-kube-api-access-dd46d\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.605915 4492 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.605925 4492 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.605942 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hww7r\" (UniqueName: \"kubernetes.io/projected/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-kube-api-access-hww7r\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.607380 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb6703a6-97ba-4d95-8094-647d0baec33e-operator-scripts\") pod \"nova-api-b365-account-create-update-4vpg6\" (UID: \"cb6703a6-97ba-4d95-8094-647d0baec33e\") " pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.607894 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-4548-account-create-update-7rfwr"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.614689 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.617431 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.617886 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-4548-account-create-update-7rfwr"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.628438 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.648048 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7xj8\" (UniqueName: \"kubernetes.io/projected/cb6703a6-97ba-4d95-8094-647d0baec33e-kube-api-access-v7xj8\") pod \"nova-api-b365-account-create-update-4vpg6\" (UID: \"cb6703a6-97ba-4d95-8094-647d0baec33e\") " pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.671232 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79cdbd64cc-bkvb2"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.671525 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" podUID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" containerName="dnsmasq-dns" containerID="cri-o://2d85f8298b8602e2feb7c10f7e331b8691efd5ed2c6c5ba4fc430de01605fa0d" gracePeriod=10 Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.705810 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-154f-account-create-update-n2256"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.707287 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.708134 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6l82\" (UniqueName: \"kubernetes.io/projected/e9b50126-fe92-4e0e-a31a-e5c40823949c-kube-api-access-b6l82\") pod \"nova-cell1-db-create-6xr2x\" (UID: \"e9b50126-fe92-4e0e-a31a-e5c40823949c\") " pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.708278 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b50126-fe92-4e0e-a31a-e5c40823949c-operator-scripts\") pod \"nova-cell1-db-create-6xr2x\" (UID: \"e9b50126-fe92-4e0e-a31a-e5c40823949c\") " pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.708319 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ae35c0-5648-46cd-86df-3193cdbd748e-operator-scripts\") pod \"nova-cell0-4548-account-create-update-7rfwr\" (UID: \"87ae35c0-5648-46cd-86df-3193cdbd748e\") " pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.708388 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg6cl\" (UniqueName: \"kubernetes.io/projected/87ae35c0-5648-46cd-86df-3193cdbd748e-kube-api-access-sg6cl\") pod \"nova-cell0-4548-account-create-update-7rfwr\" (UID: \"87ae35c0-5648-46cd-86df-3193cdbd748e\") " pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.708640 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.708850 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-154f-account-create-update-n2256"] Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.709344 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b50126-fe92-4e0e-a31a-e5c40823949c-operator-scripts\") pod \"nova-cell1-db-create-6xr2x\" (UID: \"e9b50126-fe92-4e0e-a31a-e5c40823949c\") " pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.732671 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6l82\" (UniqueName: \"kubernetes.io/projected/e9b50126-fe92-4e0e-a31a-e5c40823949c-kube-api-access-b6l82\") pod \"nova-cell1-db-create-6xr2x\" (UID: \"e9b50126-fe92-4e0e-a31a-e5c40823949c\") " pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.749670 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.811964 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zr9z8\" (UniqueName: \"kubernetes.io/projected/88818f37-64b1-4583-a66f-f6fc347fed00-kube-api-access-zr9z8\") pod \"nova-cell1-154f-account-create-update-n2256\" (UID: \"88818f37-64b1-4583-a66f-f6fc347fed00\") " pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.812091 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ae35c0-5648-46cd-86df-3193cdbd748e-operator-scripts\") pod \"nova-cell0-4548-account-create-update-7rfwr\" (UID: \"87ae35c0-5648-46cd-86df-3193cdbd748e\") " pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.813012 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ae35c0-5648-46cd-86df-3193cdbd748e-operator-scripts\") pod \"nova-cell0-4548-account-create-update-7rfwr\" (UID: \"87ae35c0-5648-46cd-86df-3193cdbd748e\") " pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.812254 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88818f37-64b1-4583-a66f-f6fc347fed00-operator-scripts\") pod \"nova-cell1-154f-account-create-update-n2256\" (UID: \"88818f37-64b1-4583-a66f-f6fc347fed00\") " pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.813424 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg6cl\" (UniqueName: \"kubernetes.io/projected/87ae35c0-5648-46cd-86df-3193cdbd748e-kube-api-access-sg6cl\") pod \"nova-cell0-4548-account-create-update-7rfwr\" (UID: \"87ae35c0-5648-46cd-86df-3193cdbd748e\") " pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.844307 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg6cl\" (UniqueName: \"kubernetes.io/projected/87ae35c0-5648-46cd-86df-3193cdbd748e-kube-api-access-sg6cl\") pod \"nova-cell0-4548-account-create-update-7rfwr\" (UID: \"87ae35c0-5648-46cd-86df-3193cdbd748e\") " pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.879543 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" (UID: "5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.883282 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d9c5649-030f-45ea-9683-0570ed0435ab" (UID: "5d9c5649-030f-45ea-9683-0570ed0435ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.924583 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88818f37-64b1-4583-a66f-f6fc347fed00-operator-scripts\") pod \"nova-cell1-154f-account-create-update-n2256\" (UID: \"88818f37-64b1-4583-a66f-f6fc347fed00\") " pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.924718 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zr9z8\" (UniqueName: \"kubernetes.io/projected/88818f37-64b1-4583-a66f-f6fc347fed00-kube-api-access-zr9z8\") pod \"nova-cell1-154f-account-create-update-n2256\" (UID: \"88818f37-64b1-4583-a66f-f6fc347fed00\") " pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.924774 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.924785 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.925816 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88818f37-64b1-4583-a66f-f6fc347fed00-operator-scripts\") pod \"nova-cell1-154f-account-create-update-n2256\" (UID: \"88818f37-64b1-4583-a66f-f6fc347fed00\") " pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.946604 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" podUID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.169:5353: connect: connection refused" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.953874 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zr9z8\" (UniqueName: \"kubernetes.io/projected/88818f37-64b1-4583-a66f-f6fc347fed00-kube-api-access-zr9z8\") pod \"nova-cell1-154f-account-create-update-n2256\" (UID: \"88818f37-64b1-4583-a66f-f6fc347fed00\") " pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.969598 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data" (OuterVolumeSpecName: "config-data") pod "5d9c5649-030f-45ea-9683-0570ed0435ab" (UID: "5d9c5649-030f-45ea-9683-0570ed0435ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:07 crc kubenswrapper[4492]: I1126 07:07:07.972732 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data" (OuterVolumeSpecName: "config-data") pod "5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" (UID: "5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.027749 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d9c5649-030f-45ea-9683-0570ed0435ab-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.027877 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.087888 4492 scope.go:117] "RemoveContainer" containerID="18cbb279cf481eaa5e2b0c5d654d9e17548561ba519536b900089d8c0bf14865" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.127301 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.155093 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-56748689fb-22p2v"] Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.158068 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.162694 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.175994 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-56748689fb-22p2v"] Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.239669 4492 scope.go:117] "RemoveContainer" containerID="99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.247482 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7d7c7fc74d-977pm"] Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.264252 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7d7c7fc74d-977pm"] Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.269829 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-2mcb9"] Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.354740 4492 scope.go:117] "RemoveContainer" containerID="99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081" Nov 26 07:07:08 crc kubenswrapper[4492]: E1126 07:07:08.357541 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081\": container with ID starting with 99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081 not found: ID does not exist" containerID="99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.357588 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081"} err="failed to get container status \"99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081\": rpc error: code = NotFound desc = could not find container \"99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081\": container with ID starting with 99b27bf3d24b7dc4a90e43417ff1417d55f643665789ebe59138925bcec0a081 not found: ID does not exist" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.587031 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6" path="/var/lib/kubelet/pods/5a2cfab6-77d9-4d88-aee3-c0b10ead6ff6/volumes" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.587803 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d9c5649-030f-45ea-9683-0570ed0435ab" path="/var/lib/kubelet/pods/5d9c5649-030f-45ea-9683-0570ed0435ab/volumes" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.588349 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-cfv7t"] Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.754275 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-cc797855b-f4zsl" event={"ID":"b4e68b8f-9411-48b7-85b4-a0d5dcae77e1","Type":"ContainerStarted","Data":"43e83822b71ea9354a0ed6ab4684b1054e1cfdb7cb068fdcce25f691dd4fb2e4"} Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.754597 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.770078 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-564669c98c-fvvts" event={"ID":"0b009a36-769a-4f9e-a2ac-a188dd0c5dd1","Type":"ContainerStarted","Data":"da75648325c81d7559de02bea0911635fce549f55e61b0c55c9a39a1b5f7518e"} Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.770402 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.807034 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-cc797855b-f4zsl" podStartSLOduration=2.807011623 podStartE2EDuration="2.807011623s" podCreationTimestamp="2025-11-26 07:07:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:08.794981065 +0000 UTC m=+1124.678869362" watchObservedRunningTime="2025-11-26 07:07:08.807011623 +0000 UTC m=+1124.690899921" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.815647 4492 scope.go:117] "RemoveContainer" containerID="ad49995e2aecaba86e1a66b69a5c31ce8b84c3fcda1fb95b90b721c027caf77a" Nov 26 07:07:08 crc kubenswrapper[4492]: E1126 07:07:08.818862 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-65c87b98b5-7kjlx_openstack(c7817ef7-6407-49f2-8f5b-3357945f0ec0)\"" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.825738 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.834004 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2mcb9" event={"ID":"66f5a9c1-c990-415e-b0e2-28d8ff866cf1","Type":"ContainerStarted","Data":"830b48653b5edee2889d75c4367badf7ef459154d38a762094b84d1ae1b4b908"} Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.835675 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cfv7t" event={"ID":"ae7dd4ac-dc69-4186-923f-40616d7fbea6","Type":"ContainerStarted","Data":"91dd1a72b4646e7c641ed5b1b9228a9712722d3331f04c09cad08b6a1eb133ef"} Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.845435 4492 scope.go:117] "RemoveContainer" containerID="aef0af39dc68fdb9a04bc4ec82267b35281b86211b9b0adf8c987803d81f21f4" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.857286 4492 generic.go:334] "Generic (PLEG): container finished" podID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" containerID="2d85f8298b8602e2feb7c10f7e331b8691efd5ed2c6c5ba4fc430de01605fa0d" exitCode=0 Nov 26 07:07:08 crc kubenswrapper[4492]: E1126 07:07:08.858360 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-75f4bc59f-8qztx_openstack(3e950c7f-2241-4475-a891-d97102b54b9b)\"" pod="openstack/heat-api-75f4bc59f-8qztx" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.859052 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.859366 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79cdbd64cc-bkvb2" event={"ID":"d2ee530e-90d8-4bc3-874a-d42b55783ee8","Type":"ContainerDied","Data":"2d85f8298b8602e2feb7c10f7e331b8691efd5ed2c6c5ba4fc430de01605fa0d"} Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.859409 4492 scope.go:117] "RemoveContainer" containerID="2d85f8298b8602e2feb7c10f7e331b8691efd5ed2c6c5ba4fc430de01605fa0d" Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.895190 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-svc\") pod \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.895225 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-nb\") pod \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.895327 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-config\") pod \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.895450 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-sb\") pod \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.895492 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p29bw\" (UniqueName: \"kubernetes.io/projected/d2ee530e-90d8-4bc3-874a-d42b55783ee8-kube-api-access-p29bw\") pod \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.899612 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-swift-storage-0\") pod \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\" (UID: \"d2ee530e-90d8-4bc3-874a-d42b55783ee8\") " Nov 26 07:07:08 crc kubenswrapper[4492]: I1126 07:07:08.963435 4492 scope.go:117] "RemoveContainer" containerID="ab9765e78ad50dd8926861bae83f8adc9b5e1d55ec08346c62cdb48b5a0d5a6d" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.061550 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-564669c98c-fvvts" podStartSLOduration=3.061529916 podStartE2EDuration="3.061529916s" podCreationTimestamp="2025-11-26 07:07:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:08.832013909 +0000 UTC m=+1124.715902208" watchObservedRunningTime="2025-11-26 07:07:09.061529916 +0000 UTC m=+1124.945418214" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.090752 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2ee530e-90d8-4bc3-874a-d42b55783ee8-kube-api-access-p29bw" (OuterVolumeSpecName: "kube-api-access-p29bw") pod "d2ee530e-90d8-4bc3-874a-d42b55783ee8" (UID: "d2ee530e-90d8-4bc3-874a-d42b55783ee8"). InnerVolumeSpecName "kube-api-access-p29bw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.159846 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p29bw\" (UniqueName: \"kubernetes.io/projected/d2ee530e-90d8-4bc3-874a-d42b55783ee8-kube-api-access-p29bw\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.170439 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b365-account-create-update-4vpg6"] Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.224096 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-6xr2x"] Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.398019 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d2ee530e-90d8-4bc3-874a-d42b55783ee8" (UID: "d2ee530e-90d8-4bc3-874a-d42b55783ee8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.402292 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-154f-account-create-update-n2256"] Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.433662 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-4548-account-create-update-7rfwr"] Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.438678 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d2ee530e-90d8-4bc3-874a-d42b55783ee8" (UID: "d2ee530e-90d8-4bc3-874a-d42b55783ee8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.468856 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d2ee530e-90d8-4bc3-874a-d42b55783ee8" (UID: "d2ee530e-90d8-4bc3-874a-d42b55783ee8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.494401 4492 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.494423 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.494434 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.496716 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-config" (OuterVolumeSpecName: "config") pod "d2ee530e-90d8-4bc3-874a-d42b55783ee8" (UID: "d2ee530e-90d8-4bc3-874a-d42b55783ee8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.596422 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.670760 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.670807 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.671222 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d2ee530e-90d8-4bc3-874a-d42b55783ee8" (UID: "d2ee530e-90d8-4bc3-874a-d42b55783ee8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.698287 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2ee530e-90d8-4bc3-874a-d42b55783ee8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.707770 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.707850 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.836727 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79cdbd64cc-bkvb2"] Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.840288 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79cdbd64cc-bkvb2"] Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.888040 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-154f-account-create-update-n2256" event={"ID":"88818f37-64b1-4583-a66f-f6fc347fed00","Type":"ContainerStarted","Data":"c3b94e6b395ae2ca0c2d562232bce5750d87013be5302a6c1f153132c0bba72c"} Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.898501 4492 generic.go:334] "Generic (PLEG): container finished" podID="66f5a9c1-c990-415e-b0e2-28d8ff866cf1" containerID="88e1fd74c66a6729340d4ae4796c21a0e05e2aa899fa2d2c3406ade5953b58d2" exitCode=0 Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.898567 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2mcb9" event={"ID":"66f5a9c1-c990-415e-b0e2-28d8ff866cf1","Type":"ContainerDied","Data":"88e1fd74c66a6729340d4ae4796c21a0e05e2aa899fa2d2c3406ade5953b58d2"} Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.900121 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b365-account-create-update-4vpg6" event={"ID":"cb6703a6-97ba-4d95-8094-647d0baec33e","Type":"ContainerStarted","Data":"8352c3846c3d7e947d57816268b126813ef01e587ab5126180b2fefeb04c7680"} Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.900147 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b365-account-create-update-4vpg6" event={"ID":"cb6703a6-97ba-4d95-8094-647d0baec33e","Type":"ContainerStarted","Data":"9ecab4ab2a484638df68bb365cdf62eae7ceab927ceb9055ac50f3e53b7ce89b"} Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.923534 4492 generic.go:334] "Generic (PLEG): container finished" podID="ae7dd4ac-dc69-4186-923f-40616d7fbea6" containerID="2129a621888d0d70629f76ed5efc6c3a2b6445c1fde2499772f615b55bc844ff" exitCode=0 Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.923772 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cfv7t" event={"ID":"ae7dd4ac-dc69-4186-923f-40616d7fbea6","Type":"ContainerDied","Data":"2129a621888d0d70629f76ed5efc6c3a2b6445c1fde2499772f615b55bc844ff"} Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.938867 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-6xr2x" event={"ID":"e9b50126-fe92-4e0e-a31a-e5c40823949c","Type":"ContainerStarted","Data":"095fc718a0ac32ea00f9dc11ae768c035b2263601f68d397e70e7f24ebcad089"} Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.939047 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-6xr2x" event={"ID":"e9b50126-fe92-4e0e-a31a-e5c40823949c","Type":"ContainerStarted","Data":"1ee2a8219b78ed5a68c0dc21542cbabfbe0cf84c753ccb4414722e9c5e8ac679"} Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.945110 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-4548-account-create-update-7rfwr" event={"ID":"87ae35c0-5648-46cd-86df-3193cdbd748e","Type":"ContainerStarted","Data":"b2a7ba70f27f3907c1dd63011430b4dd2486841f430b0351a1beecd0900efe4e"} Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.957880 4492 generic.go:334] "Generic (PLEG): container finished" podID="37ed6b48-37b7-479f-837f-d49432778c49" containerID="f0bcd12fa4afc6ff4c40910d74c5855c7cc966cd3bd6cc0b480334ee5744bacd" exitCode=0 Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.958812 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37ed6b48-37b7-479f-837f-d49432778c49","Type":"ContainerDied","Data":"f0bcd12fa4afc6ff4c40910d74c5855c7cc966cd3bd6cc0b480334ee5744bacd"} Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.959128 4492 scope.go:117] "RemoveContainer" containerID="aef0af39dc68fdb9a04bc4ec82267b35281b86211b9b0adf8c987803d81f21f4" Nov 26 07:07:09 crc kubenswrapper[4492]: E1126 07:07:09.959314 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-75f4bc59f-8qztx_openstack(3e950c7f-2241-4475-a891-d97102b54b9b)\"" pod="openstack/heat-api-75f4bc59f-8qztx" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.959574 4492 scope.go:117] "RemoveContainer" containerID="ad49995e2aecaba86e1a66b69a5c31ce8b84c3fcda1fb95b90b721c027caf77a" Nov 26 07:07:09 crc kubenswrapper[4492]: E1126 07:07:09.959796 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-65c87b98b5-7kjlx_openstack(c7817ef7-6407-49f2-8f5b-3357945f0ec0)\"" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.967133 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-b365-account-create-update-4vpg6" podStartSLOduration=2.967121538 podStartE2EDuration="2.967121538s" podCreationTimestamp="2025-11-26 07:07:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:09.954381546 +0000 UTC m=+1125.838269843" watchObservedRunningTime="2025-11-26 07:07:09.967121538 +0000 UTC m=+1125.851009835" Nov 26 07:07:09 crc kubenswrapper[4492]: I1126 07:07:09.992843 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-6xr2x" podStartSLOduration=2.992826174 podStartE2EDuration="2.992826174s" podCreationTimestamp="2025-11-26 07:07:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:09.980850329 +0000 UTC m=+1125.864738627" watchObservedRunningTime="2025-11-26 07:07:09.992826174 +0000 UTC m=+1125.876714473" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.224039 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.320908 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-httpd-run\") pod \"37ed6b48-37b7-479f-837f-d49432778c49\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.320972 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-public-tls-certs\") pod \"37ed6b48-37b7-479f-837f-d49432778c49\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.321021 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"37ed6b48-37b7-479f-837f-d49432778c49\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.321180 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-combined-ca-bundle\") pod \"37ed6b48-37b7-479f-837f-d49432778c49\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.321214 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxhr5\" (UniqueName: \"kubernetes.io/projected/37ed6b48-37b7-479f-837f-d49432778c49-kube-api-access-kxhr5\") pod \"37ed6b48-37b7-479f-837f-d49432778c49\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.321251 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-scripts\") pod \"37ed6b48-37b7-479f-837f-d49432778c49\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.321318 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-config-data\") pod \"37ed6b48-37b7-479f-837f-d49432778c49\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.321428 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-logs\") pod \"37ed6b48-37b7-479f-837f-d49432778c49\" (UID: \"37ed6b48-37b7-479f-837f-d49432778c49\") " Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.325352 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-logs" (OuterVolumeSpecName: "logs") pod "37ed6b48-37b7-479f-837f-d49432778c49" (UID: "37ed6b48-37b7-479f-837f-d49432778c49"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.332112 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "37ed6b48-37b7-479f-837f-d49432778c49" (UID: "37ed6b48-37b7-479f-837f-d49432778c49"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.345497 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "37ed6b48-37b7-479f-837f-d49432778c49" (UID: "37ed6b48-37b7-479f-837f-d49432778c49"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.352993 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-scripts" (OuterVolumeSpecName: "scripts") pod "37ed6b48-37b7-479f-837f-d49432778c49" (UID: "37ed6b48-37b7-479f-837f-d49432778c49"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.355375 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37ed6b48-37b7-479f-837f-d49432778c49-kube-api-access-kxhr5" (OuterVolumeSpecName: "kube-api-access-kxhr5") pod "37ed6b48-37b7-479f-837f-d49432778c49" (UID: "37ed6b48-37b7-479f-837f-d49432778c49"). InnerVolumeSpecName "kube-api-access-kxhr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.393545 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "37ed6b48-37b7-479f-837f-d49432778c49" (UID: "37ed6b48-37b7-479f-837f-d49432778c49"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.395359 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37ed6b48-37b7-479f-837f-d49432778c49" (UID: "37ed6b48-37b7-479f-837f-d49432778c49"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.404894 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-config-data" (OuterVolumeSpecName: "config-data") pod "37ed6b48-37b7-479f-837f-d49432778c49" (UID: "37ed6b48-37b7-479f-837f-d49432778c49"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.423712 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.423876 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxhr5\" (UniqueName: \"kubernetes.io/projected/37ed6b48-37b7-479f-837f-d49432778c49-kube-api-access-kxhr5\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.423955 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.424008 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.424056 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.424103 4492 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37ed6b48-37b7-479f-837f-d49432778c49-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.424155 4492 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37ed6b48-37b7-479f-837f-d49432778c49-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.424266 4492 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.451162 4492 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.465366 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" path="/var/lib/kubelet/pods/d2ee530e-90d8-4bc3-874a-d42b55783ee8/volumes" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.536826 4492 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.972662 4492 generic.go:334] "Generic (PLEG): container finished" podID="e9b50126-fe92-4e0e-a31a-e5c40823949c" containerID="095fc718a0ac32ea00f9dc11ae768c035b2263601f68d397e70e7f24ebcad089" exitCode=0 Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.973335 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-6xr2x" event={"ID":"e9b50126-fe92-4e0e-a31a-e5c40823949c","Type":"ContainerDied","Data":"095fc718a0ac32ea00f9dc11ae768c035b2263601f68d397e70e7f24ebcad089"} Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.975253 4492 generic.go:334] "Generic (PLEG): container finished" podID="87ae35c0-5648-46cd-86df-3193cdbd748e" containerID="63aec72331020afe59469e1a13758e8e0943f6f6bd27ed93dbf0e1d4b10a0058" exitCode=0 Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.975306 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-4548-account-create-update-7rfwr" event={"ID":"87ae35c0-5648-46cd-86df-3193cdbd748e","Type":"ContainerDied","Data":"63aec72331020afe59469e1a13758e8e0943f6f6bd27ed93dbf0e1d4b10a0058"} Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.976978 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37ed6b48-37b7-479f-837f-d49432778c49","Type":"ContainerDied","Data":"8c3d663e6cefacc24b7ee290273dea6e676208d64b608e3d4fbc1e927295ac14"} Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.977018 4492 scope.go:117] "RemoveContainer" containerID="f0bcd12fa4afc6ff4c40910d74c5855c7cc966cd3bd6cc0b480334ee5744bacd" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.977233 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.983044 4492 generic.go:334] "Generic (PLEG): container finished" podID="88818f37-64b1-4583-a66f-f6fc347fed00" containerID="160fdc4870abaef7485d921a8bbfef9c47e9aa6038e12d63f5edf617a263d7e5" exitCode=0 Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.983101 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-154f-account-create-update-n2256" event={"ID":"88818f37-64b1-4583-a66f-f6fc347fed00","Type":"ContainerDied","Data":"160fdc4870abaef7485d921a8bbfef9c47e9aa6038e12d63f5edf617a263d7e5"} Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.984548 4492 generic.go:334] "Generic (PLEG): container finished" podID="cb6703a6-97ba-4d95-8094-647d0baec33e" containerID="8352c3846c3d7e947d57816268b126813ef01e587ab5126180b2fefeb04c7680" exitCode=0 Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.984714 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b365-account-create-update-4vpg6" event={"ID":"cb6703a6-97ba-4d95-8094-647d0baec33e","Type":"ContainerDied","Data":"8352c3846c3d7e947d57816268b126813ef01e587ab5126180b2fefeb04c7680"} Nov 26 07:07:10 crc kubenswrapper[4492]: I1126 07:07:10.985093 4492 scope.go:117] "RemoveContainer" containerID="aef0af39dc68fdb9a04bc4ec82267b35281b86211b9b0adf8c987803d81f21f4" Nov 26 07:07:10 crc kubenswrapper[4492]: E1126 07:07:10.985429 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-75f4bc59f-8qztx_openstack(3e950c7f-2241-4475-a891-d97102b54b9b)\"" pod="openstack/heat-api-75f4bc59f-8qztx" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.017122 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.022849 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.065310 4492 scope.go:117] "RemoveContainer" containerID="56f8eb6952e555b8950f54a9952d93a5f0c9484e60f683a0a4cfaa23ea23f274" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.085614 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:07:11 crc kubenswrapper[4492]: E1126 07:07:11.086349 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37ed6b48-37b7-479f-837f-d49432778c49" containerName="glance-log" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.086378 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="37ed6b48-37b7-479f-837f-d49432778c49" containerName="glance-log" Nov 26 07:07:11 crc kubenswrapper[4492]: E1126 07:07:11.086408 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" containerName="dnsmasq-dns" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.086417 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" containerName="dnsmasq-dns" Nov 26 07:07:11 crc kubenswrapper[4492]: E1126 07:07:11.086434 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" containerName="init" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.086440 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" containerName="init" Nov 26 07:07:11 crc kubenswrapper[4492]: E1126 07:07:11.086491 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37ed6b48-37b7-479f-837f-d49432778c49" containerName="glance-httpd" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.086499 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="37ed6b48-37b7-479f-837f-d49432778c49" containerName="glance-httpd" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.086797 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2ee530e-90d8-4bc3-874a-d42b55783ee8" containerName="dnsmasq-dns" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.086834 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="37ed6b48-37b7-479f-837f-d49432778c49" containerName="glance-httpd" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.086849 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="37ed6b48-37b7-479f-837f-d49432778c49" containerName="glance-log" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.089525 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.094509 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.094716 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.127222 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.260922 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.261000 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f99c970-42c7-46d7-bbc9-8d8426773700-logs\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.261085 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.261113 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-config-data\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.261135 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f99c970-42c7-46d7-bbc9-8d8426773700-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.261473 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.261542 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-scripts\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.261617 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8wqp\" (UniqueName: \"kubernetes.io/projected/6f99c970-42c7-46d7-bbc9-8d8426773700-kube-api-access-q8wqp\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.365608 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8wqp\" (UniqueName: \"kubernetes.io/projected/6f99c970-42c7-46d7-bbc9-8d8426773700-kube-api-access-q8wqp\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.365704 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.365763 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f99c970-42c7-46d7-bbc9-8d8426773700-logs\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.365816 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.365864 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-config-data\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.365885 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f99c970-42c7-46d7-bbc9-8d8426773700-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.365931 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.366008 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-scripts\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.373253 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-scripts\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.378567 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.378839 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f99c970-42c7-46d7-bbc9-8d8426773700-logs\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.379134 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.396336 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f99c970-42c7-46d7-bbc9-8d8426773700-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.405201 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-config-data\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.406076 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8wqp\" (UniqueName: \"kubernetes.io/projected/6f99c970-42c7-46d7-bbc9-8d8426773700-kube-api-access-q8wqp\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.413014 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f99c970-42c7-46d7-bbc9-8d8426773700-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.444684 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"6f99c970-42c7-46d7-bbc9-8d8426773700\") " pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.456649 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.525131 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.571236 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-operator-scripts\") pod \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\" (UID: \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\") " Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.571697 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "66f5a9c1-c990-415e-b0e2-28d8ff866cf1" (UID: "66f5a9c1-c990-415e-b0e2-28d8ff866cf1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.571718 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9q28f\" (UniqueName: \"kubernetes.io/projected/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-kube-api-access-9q28f\") pod \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\" (UID: \"66f5a9c1-c990-415e-b0e2-28d8ff866cf1\") " Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.573495 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.576301 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-kube-api-access-9q28f" (OuterVolumeSpecName: "kube-api-access-9q28f") pod "66f5a9c1-c990-415e-b0e2-28d8ff866cf1" (UID: "66f5a9c1-c990-415e-b0e2-28d8ff866cf1"). InnerVolumeSpecName "kube-api-access-9q28f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.674794 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2dg5\" (UniqueName: \"kubernetes.io/projected/ae7dd4ac-dc69-4186-923f-40616d7fbea6-kube-api-access-t2dg5\") pod \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\" (UID: \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\") " Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.675738 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae7dd4ac-dc69-4186-923f-40616d7fbea6-operator-scripts\") pod \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\" (UID: \"ae7dd4ac-dc69-4186-923f-40616d7fbea6\") " Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.676598 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9q28f\" (UniqueName: \"kubernetes.io/projected/66f5a9c1-c990-415e-b0e2-28d8ff866cf1-kube-api-access-9q28f\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.676927 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae7dd4ac-dc69-4186-923f-40616d7fbea6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ae7dd4ac-dc69-4186-923f-40616d7fbea6" (UID: "ae7dd4ac-dc69-4186-923f-40616d7fbea6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.680382 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae7dd4ac-dc69-4186-923f-40616d7fbea6-kube-api-access-t2dg5" (OuterVolumeSpecName: "kube-api-access-t2dg5") pod "ae7dd4ac-dc69-4186-923f-40616d7fbea6" (UID: "ae7dd4ac-dc69-4186-923f-40616d7fbea6"). InnerVolumeSpecName "kube-api-access-t2dg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.737445 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.778987 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae7dd4ac-dc69-4186-923f-40616d7fbea6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.779087 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2dg5\" (UniqueName: \"kubernetes.io/projected/ae7dd4ac-dc69-4186-923f-40616d7fbea6-kube-api-access-t2dg5\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.996324 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2mcb9" event={"ID":"66f5a9c1-c990-415e-b0e2-28d8ff866cf1","Type":"ContainerDied","Data":"830b48653b5edee2889d75c4367badf7ef459154d38a762094b84d1ae1b4b908"} Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.996373 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2mcb9" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.996380 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="830b48653b5edee2889d75c4367badf7ef459154d38a762094b84d1ae1b4b908" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.998311 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cfv7t" event={"ID":"ae7dd4ac-dc69-4186-923f-40616d7fbea6","Type":"ContainerDied","Data":"91dd1a72b4646e7c641ed5b1b9228a9712722d3331f04c09cad08b6a1eb133ef"} Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.998346 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91dd1a72b4646e7c641ed5b1b9228a9712722d3331f04c09cad08b6a1eb133ef" Nov 26 07:07:11 crc kubenswrapper[4492]: I1126 07:07:11.998431 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cfv7t" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.151032 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.151276 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerName="glance-log" containerID="cri-o://6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906" gracePeriod=30 Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.151665 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerName="glance-httpd" containerID="cri-o://41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce" gracePeriod=30 Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.351441 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.363686 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.484289 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37ed6b48-37b7-479f-837f-d49432778c49" path="/var/lib/kubelet/pods/37ed6b48-37b7-479f-837f-d49432778c49/volumes" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.491006 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.504307 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zr9z8\" (UniqueName: \"kubernetes.io/projected/88818f37-64b1-4583-a66f-f6fc347fed00-kube-api-access-zr9z8\") pod \"88818f37-64b1-4583-a66f-f6fc347fed00\" (UID: \"88818f37-64b1-4583-a66f-f6fc347fed00\") " Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.504501 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88818f37-64b1-4583-a66f-f6fc347fed00-operator-scripts\") pod \"88818f37-64b1-4583-a66f-f6fc347fed00\" (UID: \"88818f37-64b1-4583-a66f-f6fc347fed00\") " Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.506473 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88818f37-64b1-4583-a66f-f6fc347fed00-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "88818f37-64b1-4583-a66f-f6fc347fed00" (UID: "88818f37-64b1-4583-a66f-f6fc347fed00"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.583483 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88818f37-64b1-4583-a66f-f6fc347fed00-kube-api-access-zr9z8" (OuterVolumeSpecName: "kube-api-access-zr9z8") pod "88818f37-64b1-4583-a66f-f6fc347fed00" (UID: "88818f37-64b1-4583-a66f-f6fc347fed00"). InnerVolumeSpecName "kube-api-access-zr9z8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.608129 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb6703a6-97ba-4d95-8094-647d0baec33e-operator-scripts\") pod \"cb6703a6-97ba-4d95-8094-647d0baec33e\" (UID: \"cb6703a6-97ba-4d95-8094-647d0baec33e\") " Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.608222 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7xj8\" (UniqueName: \"kubernetes.io/projected/cb6703a6-97ba-4d95-8094-647d0baec33e-kube-api-access-v7xj8\") pod \"cb6703a6-97ba-4d95-8094-647d0baec33e\" (UID: \"cb6703a6-97ba-4d95-8094-647d0baec33e\") " Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.609007 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb6703a6-97ba-4d95-8094-647d0baec33e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cb6703a6-97ba-4d95-8094-647d0baec33e" (UID: "cb6703a6-97ba-4d95-8094-647d0baec33e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.609733 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zr9z8\" (UniqueName: \"kubernetes.io/projected/88818f37-64b1-4583-a66f-f6fc347fed00-kube-api-access-zr9z8\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.609755 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88818f37-64b1-4583-a66f-f6fc347fed00-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.609766 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb6703a6-97ba-4d95-8094-647d0baec33e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.631253 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb6703a6-97ba-4d95-8094-647d0baec33e-kube-api-access-v7xj8" (OuterVolumeSpecName: "kube-api-access-v7xj8") pod "cb6703a6-97ba-4d95-8094-647d0baec33e" (UID: "cb6703a6-97ba-4d95-8094-647d0baec33e"). InnerVolumeSpecName "kube-api-access-v7xj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.714208 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7xj8\" (UniqueName: \"kubernetes.io/projected/cb6703a6-97ba-4d95-8094-647d0baec33e-kube-api-access-v7xj8\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.744860 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.754875 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.815001 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sg6cl\" (UniqueName: \"kubernetes.io/projected/87ae35c0-5648-46cd-86df-3193cdbd748e-kube-api-access-sg6cl\") pod \"87ae35c0-5648-46cd-86df-3193cdbd748e\" (UID: \"87ae35c0-5648-46cd-86df-3193cdbd748e\") " Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.815555 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6l82\" (UniqueName: \"kubernetes.io/projected/e9b50126-fe92-4e0e-a31a-e5c40823949c-kube-api-access-b6l82\") pod \"e9b50126-fe92-4e0e-a31a-e5c40823949c\" (UID: \"e9b50126-fe92-4e0e-a31a-e5c40823949c\") " Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.815624 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ae35c0-5648-46cd-86df-3193cdbd748e-operator-scripts\") pod \"87ae35c0-5648-46cd-86df-3193cdbd748e\" (UID: \"87ae35c0-5648-46cd-86df-3193cdbd748e\") " Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.815719 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b50126-fe92-4e0e-a31a-e5c40823949c-operator-scripts\") pod \"e9b50126-fe92-4e0e-a31a-e5c40823949c\" (UID: \"e9b50126-fe92-4e0e-a31a-e5c40823949c\") " Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.816966 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87ae35c0-5648-46cd-86df-3193cdbd748e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "87ae35c0-5648-46cd-86df-3193cdbd748e" (UID: "87ae35c0-5648-46cd-86df-3193cdbd748e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.817247 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9b50126-fe92-4e0e-a31a-e5c40823949c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e9b50126-fe92-4e0e-a31a-e5c40823949c" (UID: "e9b50126-fe92-4e0e-a31a-e5c40823949c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.820697 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87ae35c0-5648-46cd-86df-3193cdbd748e-kube-api-access-sg6cl" (OuterVolumeSpecName: "kube-api-access-sg6cl") pod "87ae35c0-5648-46cd-86df-3193cdbd748e" (UID: "87ae35c0-5648-46cd-86df-3193cdbd748e"). InnerVolumeSpecName "kube-api-access-sg6cl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.824344 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9b50126-fe92-4e0e-a31a-e5c40823949c-kube-api-access-b6l82" (OuterVolumeSpecName: "kube-api-access-b6l82") pod "e9b50126-fe92-4e0e-a31a-e5c40823949c" (UID: "e9b50126-fe92-4e0e-a31a-e5c40823949c"). InnerVolumeSpecName "kube-api-access-b6l82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.919691 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6l82\" (UniqueName: \"kubernetes.io/projected/e9b50126-fe92-4e0e-a31a-e5c40823949c-kube-api-access-b6l82\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.919731 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ae35c0-5648-46cd-86df-3193cdbd748e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.919743 4492 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e9b50126-fe92-4e0e-a31a-e5c40823949c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:12 crc kubenswrapper[4492]: I1126 07:07:12.919753 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sg6cl\" (UniqueName: \"kubernetes.io/projected/87ae35c0-5648-46cd-86df-3193cdbd748e-kube-api-access-sg6cl\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.019803 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-6xr2x" event={"ID":"e9b50126-fe92-4e0e-a31a-e5c40823949c","Type":"ContainerDied","Data":"1ee2a8219b78ed5a68c0dc21542cbabfbe0cf84c753ccb4414722e9c5e8ac679"} Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.019859 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ee2a8219b78ed5a68c0dc21542cbabfbe0cf84c753ccb4414722e9c5e8ac679" Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.019960 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-6xr2x" Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.026521 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f99c970-42c7-46d7-bbc9-8d8426773700","Type":"ContainerStarted","Data":"48dff824474d00dc43dc5c0dc3dbfbd9031afeb5b166af8270875190c4c184ae"} Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.029607 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-4548-account-create-update-7rfwr" Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.029777 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-4548-account-create-update-7rfwr" event={"ID":"87ae35c0-5648-46cd-86df-3193cdbd748e","Type":"ContainerDied","Data":"b2a7ba70f27f3907c1dd63011430b4dd2486841f430b0351a1beecd0900efe4e"} Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.029841 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2a7ba70f27f3907c1dd63011430b4dd2486841f430b0351a1beecd0900efe4e" Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.035568 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-154f-account-create-update-n2256" event={"ID":"88818f37-64b1-4583-a66f-f6fc347fed00","Type":"ContainerDied","Data":"c3b94e6b395ae2ca0c2d562232bce5750d87013be5302a6c1f153132c0bba72c"} Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.035611 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c3b94e6b395ae2ca0c2d562232bce5750d87013be5302a6c1f153132c0bba72c" Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.035644 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-154f-account-create-update-n2256" Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.043694 4492 generic.go:334] "Generic (PLEG): container finished" podID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerID="6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906" exitCode=143 Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.043780 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5497f148-a137-4ee1-9fed-689d83f91c2a","Type":"ContainerDied","Data":"6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906"} Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.046847 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b365-account-create-update-4vpg6" event={"ID":"cb6703a6-97ba-4d95-8094-647d0baec33e","Type":"ContainerDied","Data":"9ecab4ab2a484638df68bb365cdf62eae7ceab927ceb9055ac50f3e53b7ce89b"} Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.046875 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ecab4ab2a484638df68bb365cdf62eae7ceab927ceb9055ac50f3e53b7ce89b" Nov 26 07:07:13 crc kubenswrapper[4492]: I1126 07:07:13.046998 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b365-account-create-update-4vpg6" Nov 26 07:07:14 crc kubenswrapper[4492]: I1126 07:07:14.061815 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f99c970-42c7-46d7-bbc9-8d8426773700","Type":"ContainerStarted","Data":"ca34c6a0920e9a589ccdfca634acfe7bad09258e7fa6ae2150015eb9d73a8157"} Nov 26 07:07:14 crc kubenswrapper[4492]: I1126 07:07:14.062234 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f99c970-42c7-46d7-bbc9-8d8426773700","Type":"ContainerStarted","Data":"17217d6cfe0732a08a42484fb485789eea150c5ccd164e04ebe046abbfcf721c"} Nov 26 07:07:14 crc kubenswrapper[4492]: I1126 07:07:14.086946 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.086915163 podStartE2EDuration="3.086915163s" podCreationTimestamp="2025-11-26 07:07:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:14.082800808 +0000 UTC m=+1129.966689105" watchObservedRunningTime="2025-11-26 07:07:14.086915163 +0000 UTC m=+1129.970803461" Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.836983 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.904907 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-config-data\") pod \"5497f148-a137-4ee1-9fed-689d83f91c2a\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.904987 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5tct\" (UniqueName: \"kubernetes.io/projected/5497f148-a137-4ee1-9fed-689d83f91c2a-kube-api-access-h5tct\") pod \"5497f148-a137-4ee1-9fed-689d83f91c2a\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.905021 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-combined-ca-bundle\") pod \"5497f148-a137-4ee1-9fed-689d83f91c2a\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.905159 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"5497f148-a137-4ee1-9fed-689d83f91c2a\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.905217 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-internal-tls-certs\") pod \"5497f148-a137-4ee1-9fed-689d83f91c2a\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.905347 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-scripts\") pod \"5497f148-a137-4ee1-9fed-689d83f91c2a\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.905476 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-httpd-run\") pod \"5497f148-a137-4ee1-9fed-689d83f91c2a\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.905492 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-logs\") pod \"5497f148-a137-4ee1-9fed-689d83f91c2a\" (UID: \"5497f148-a137-4ee1-9fed-689d83f91c2a\") " Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.905943 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5497f148-a137-4ee1-9fed-689d83f91c2a" (UID: "5497f148-a137-4ee1-9fed-689d83f91c2a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.906156 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-logs" (OuterVolumeSpecName: "logs") pod "5497f148-a137-4ee1-9fed-689d83f91c2a" (UID: "5497f148-a137-4ee1-9fed-689d83f91c2a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.914355 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-scripts" (OuterVolumeSpecName: "scripts") pod "5497f148-a137-4ee1-9fed-689d83f91c2a" (UID: "5497f148-a137-4ee1-9fed-689d83f91c2a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.916329 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "5497f148-a137-4ee1-9fed-689d83f91c2a" (UID: "5497f148-a137-4ee1-9fed-689d83f91c2a"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.931546 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5497f148-a137-4ee1-9fed-689d83f91c2a-kube-api-access-h5tct" (OuterVolumeSpecName: "kube-api-access-h5tct") pod "5497f148-a137-4ee1-9fed-689d83f91c2a" (UID: "5497f148-a137-4ee1-9fed-689d83f91c2a"). InnerVolumeSpecName "kube-api-access-h5tct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.959561 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5497f148-a137-4ee1-9fed-689d83f91c2a" (UID: "5497f148-a137-4ee1-9fed-689d83f91c2a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.969296 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-config-data" (OuterVolumeSpecName: "config-data") pod "5497f148-a137-4ee1-9fed-689d83f91c2a" (UID: "5497f148-a137-4ee1-9fed-689d83f91c2a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:15 crc kubenswrapper[4492]: I1126 07:07:15.988273 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5497f148-a137-4ee1-9fed-689d83f91c2a" (UID: "5497f148-a137-4ee1-9fed-689d83f91c2a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.009239 4492 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.009270 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5497f148-a137-4ee1-9fed-689d83f91c2a-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.009280 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.009293 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5tct\" (UniqueName: \"kubernetes.io/projected/5497f148-a137-4ee1-9fed-689d83f91c2a-kube-api-access-h5tct\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.009306 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.009344 4492 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.009353 4492 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.009361 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5497f148-a137-4ee1-9fed-689d83f91c2a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.029383 4492 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.084911 4492 generic.go:334] "Generic (PLEG): container finished" podID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerID="41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce" exitCode=0 Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.084979 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5497f148-a137-4ee1-9fed-689d83f91c2a","Type":"ContainerDied","Data":"41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce"} Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.085018 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5497f148-a137-4ee1-9fed-689d83f91c2a","Type":"ContainerDied","Data":"67c41ae023d0d9d699f8f9ef595a310dde7c1543485ed9241356ead78a260363"} Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.085039 4492 scope.go:117] "RemoveContainer" containerID="41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.085202 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.111642 4492 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.126219 4492 scope.go:117] "RemoveContainer" containerID="6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.136725 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.148342 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.156306 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.157063 4492 scope.go:117] "RemoveContainer" containerID="41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.157711 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce\": container with ID starting with 41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce not found: ID does not exist" containerID="41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.157757 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce"} err="failed to get container status \"41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce\": rpc error: code = NotFound desc = could not find container \"41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce\": container with ID starting with 41e98fc53565a78388d9cb4965d2c8545ae7c711714e6ccd8b8ff977cb1105ce not found: ID does not exist" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.157785 4492 scope.go:117] "RemoveContainer" containerID="6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.158097 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9b50126-fe92-4e0e-a31a-e5c40823949c" containerName="mariadb-database-create" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.158255 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9b50126-fe92-4e0e-a31a-e5c40823949c" containerName="mariadb-database-create" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.158128 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906\": container with ID starting with 6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906 not found: ID does not exist" containerID="6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.158413 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906"} err="failed to get container status \"6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906\": rpc error: code = NotFound desc = could not find container \"6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906\": container with ID starting with 6171b837e0d3a2d5f863e3e5457e7b027b8bd9dd262b51cef4c8e74f17bf2906 not found: ID does not exist" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.158398 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb6703a6-97ba-4d95-8094-647d0baec33e" containerName="mariadb-account-create-update" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.158549 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb6703a6-97ba-4d95-8094-647d0baec33e" containerName="mariadb-account-create-update" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.158609 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88818f37-64b1-4583-a66f-f6fc347fed00" containerName="mariadb-account-create-update" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.158654 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="88818f37-64b1-4583-a66f-f6fc347fed00" containerName="mariadb-account-create-update" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.158700 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerName="glance-log" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.158786 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerName="glance-log" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.158835 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66f5a9c1-c990-415e-b0e2-28d8ff866cf1" containerName="mariadb-database-create" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.158878 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="66f5a9c1-c990-415e-b0e2-28d8ff866cf1" containerName="mariadb-database-create" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.158972 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerName="glance-httpd" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.159024 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerName="glance-httpd" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.159077 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae7dd4ac-dc69-4186-923f-40616d7fbea6" containerName="mariadb-database-create" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.159320 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae7dd4ac-dc69-4186-923f-40616d7fbea6" containerName="mariadb-database-create" Nov 26 07:07:16 crc kubenswrapper[4492]: E1126 07:07:16.159808 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87ae35c0-5648-46cd-86df-3193cdbd748e" containerName="mariadb-account-create-update" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.159958 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="87ae35c0-5648-46cd-86df-3193cdbd748e" containerName="mariadb-account-create-update" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.160354 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb6703a6-97ba-4d95-8094-647d0baec33e" containerName="mariadb-account-create-update" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.160431 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerName="glance-httpd" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.160489 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9b50126-fe92-4e0e-a31a-e5c40823949c" containerName="mariadb-database-create" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.160546 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="87ae35c0-5648-46cd-86df-3193cdbd748e" containerName="mariadb-account-create-update" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.160597 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="66f5a9c1-c990-415e-b0e2-28d8ff866cf1" containerName="mariadb-database-create" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.160649 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae7dd4ac-dc69-4186-923f-40616d7fbea6" containerName="mariadb-database-create" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.160695 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="5497f148-a137-4ee1-9fed-689d83f91c2a" containerName="glance-log" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.160751 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="88818f37-64b1-4583-a66f-f6fc347fed00" containerName="mariadb-account-create-update" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.162578 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.164581 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.165305 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.165341 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.317819 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.317915 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e5e94c9-4c33-46c7-a990-2e0486653c4c-logs\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.318092 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.318299 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.318398 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.318604 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.318783 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzq9n\" (UniqueName: \"kubernetes.io/projected/8e5e94c9-4c33-46c7-a990-2e0486653c4c-kube-api-access-pzq9n\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.318891 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8e5e94c9-4c33-46c7-a990-2e0486653c4c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.421022 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.421163 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.427288 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzq9n\" (UniqueName: \"kubernetes.io/projected/8e5e94c9-4c33-46c7-a990-2e0486653c4c-kube-api-access-pzq9n\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.427472 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8e5e94c9-4c33-46c7-a990-2e0486653c4c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.427653 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.427816 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e5e94c9-4c33-46c7-a990-2e0486653c4c-logs\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.427859 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.427965 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.428024 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.429759 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e5e94c9-4c33-46c7-a990-2e0486653c4c-logs\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.430025 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8e5e94c9-4c33-46c7-a990-2e0486653c4c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.433656 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.434821 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.436747 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.439104 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e5e94c9-4c33-46c7-a990-2e0486653c4c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.445859 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzq9n\" (UniqueName: \"kubernetes.io/projected/8e5e94c9-4c33-46c7-a990-2e0486653c4c-kube-api-access-pzq9n\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.450539 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"8e5e94c9-4c33-46c7-a990-2e0486653c4c\") " pod="openstack/glance-default-internal-api-0" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.454449 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5497f148-a137-4ee1-9fed-689d83f91c2a" path="/var/lib/kubelet/pods/5497f148-a137-4ee1-9fed-689d83f91c2a/volumes" Nov 26 07:07:16 crc kubenswrapper[4492]: I1126 07:07:16.485258 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.108298 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.119904 4492 generic.go:334] "Generic (PLEG): container finished" podID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerID="668b2bb36cb1b17711057eca172a0ad78a9bdd91556a97cb0b80daff59f82706" exitCode=0 Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.119961 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerDied","Data":"668b2bb36cb1b17711057eca172a0ad78a9bdd91556a97cb0b80daff59f82706"} Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.253363 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.356533 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-run-httpd\") pod \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.356888 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-combined-ca-bundle\") pod \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.356975 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7b2k8\" (UniqueName: \"kubernetes.io/projected/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-kube-api-access-7b2k8\") pod \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.357008 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-config-data\") pod \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.357051 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-sg-core-conf-yaml\") pod \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.357089 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-log-httpd\") pod \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.357120 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-scripts\") pod \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\" (UID: \"b3d44e0c-7e22-4a5d-8e2d-b946e676417d\") " Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.357525 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b3d44e0c-7e22-4a5d-8e2d-b946e676417d" (UID: "b3d44e0c-7e22-4a5d-8e2d-b946e676417d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.360698 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b3d44e0c-7e22-4a5d-8e2d-b946e676417d" (UID: "b3d44e0c-7e22-4a5d-8e2d-b946e676417d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.361789 4492 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.361817 4492 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.374060 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-kube-api-access-7b2k8" (OuterVolumeSpecName: "kube-api-access-7b2k8") pod "b3d44e0c-7e22-4a5d-8e2d-b946e676417d" (UID: "b3d44e0c-7e22-4a5d-8e2d-b946e676417d"). InnerVolumeSpecName "kube-api-access-7b2k8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.382650 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-scripts" (OuterVolumeSpecName: "scripts") pod "b3d44e0c-7e22-4a5d-8e2d-b946e676417d" (UID: "b3d44e0c-7e22-4a5d-8e2d-b946e676417d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.395776 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b3d44e0c-7e22-4a5d-8e2d-b946e676417d" (UID: "b3d44e0c-7e22-4a5d-8e2d-b946e676417d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.464007 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7b2k8\" (UniqueName: \"kubernetes.io/projected/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-kube-api-access-7b2k8\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.464040 4492 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.464050 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.469259 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3d44e0c-7e22-4a5d-8e2d-b946e676417d" (UID: "b3d44e0c-7e22-4a5d-8e2d-b946e676417d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.470855 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-config-data" (OuterVolumeSpecName: "config-data") pod "b3d44e0c-7e22-4a5d-8e2d-b946e676417d" (UID: "b3d44e0c-7e22-4a5d-8e2d-b946e676417d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.479122 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.566529 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.566562 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3d44e0c-7e22-4a5d-8e2d-b946e676417d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.885499 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wnw6v"] Nov 26 07:07:17 crc kubenswrapper[4492]: E1126 07:07:17.886260 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="ceilometer-notification-agent" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.886279 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="ceilometer-notification-agent" Nov 26 07:07:17 crc kubenswrapper[4492]: E1126 07:07:17.886313 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="proxy-httpd" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.886321 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="proxy-httpd" Nov 26 07:07:17 crc kubenswrapper[4492]: E1126 07:07:17.886331 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="sg-core" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.886337 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="sg-core" Nov 26 07:07:17 crc kubenswrapper[4492]: E1126 07:07:17.886358 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="ceilometer-central-agent" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.886364 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="ceilometer-central-agent" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.886580 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="ceilometer-central-agent" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.886599 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="sg-core" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.886606 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="proxy-httpd" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.886616 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" containerName="ceilometer-notification-agent" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.887373 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.891706 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-cx9jk" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.891886 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.892039 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.925537 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wnw6v"] Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.979375 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-config-data\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.979430 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-scripts\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.979473 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jt9k2\" (UniqueName: \"kubernetes.io/projected/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-kube-api-access-jt9k2\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:17 crc kubenswrapper[4492]: I1126 07:07:17.979503 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.086808 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-config-data\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.086867 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-scripts\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.086929 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jt9k2\" (UniqueName: \"kubernetes.io/projected/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-kube-api-access-jt9k2\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.086965 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.093725 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.094140 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-config-data\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.096691 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-scripts\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.110558 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jt9k2\" (UniqueName: \"kubernetes.io/projected/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-kube-api-access-jt9k2\") pod \"nova-cell0-conductor-db-sync-wnw6v\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.198462 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3d44e0c-7e22-4a5d-8e2d-b946e676417d","Type":"ContainerDied","Data":"dc7d3e0298bc07798ea319e3ec79686e6aa63f3cd49cf535e64bb1a446dcf5e3"} Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.198523 4492 scope.go:117] "RemoveContainer" containerID="8ecf28d6b52e685c2180672cf54db50c38cfa7a629469233361c43d6f8be3b7a" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.198671 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.209202 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.235096 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8e5e94c9-4c33-46c7-a990-2e0486653c4c","Type":"ContainerStarted","Data":"80badb5c7eb32aea6e16fa26031f058de5b76272b299c11904a8852b00733020"} Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.235138 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8e5e94c9-4c33-46c7-a990-2e0486653c4c","Type":"ContainerStarted","Data":"f7dd0fbd920a34bb67d4b06f991ab612480a942f294574f7f1c190837f4c6ddc"} Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.271211 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.281363 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.330149 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.331918 4492 scope.go:117] "RemoveContainer" containerID="bdec316493e10dfa4453c2100c9906ac89eab1d8422ceaec9088e4c01199ac09" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.335430 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.340564 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.340811 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.371419 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.388311 4492 scope.go:117] "RemoveContainer" containerID="d67947dcdc5f696f6f41a22fb29282515796fecda0296cb403bc65ebbf15cf00" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.398338 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.398389 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8v7t\" (UniqueName: \"kubernetes.io/projected/a06c80cd-520a-4f2d-883a-d901e843633c-kube-api-access-s8v7t\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.398565 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-run-httpd\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.398637 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-scripts\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.398675 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.398716 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-config-data\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.398760 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-log-httpd\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.474798 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3d44e0c-7e22-4a5d-8e2d-b946e676417d" path="/var/lib/kubelet/pods/b3d44e0c-7e22-4a5d-8e2d-b946e676417d/volumes" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.479331 4492 scope.go:117] "RemoveContainer" containerID="668b2bb36cb1b17711057eca172a0ad78a9bdd91556a97cb0b80daff59f82706" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.500590 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.500641 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-config-data\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.500671 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-log-httpd\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.500727 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.500760 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8v7t\" (UniqueName: \"kubernetes.io/projected/a06c80cd-520a-4f2d-883a-d901e843633c-kube-api-access-s8v7t\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.500829 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-run-httpd\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.500863 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-scripts\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.502681 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-log-httpd\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.502949 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-run-httpd\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.507895 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-scripts\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.509438 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-config-data\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.513676 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.518641 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.531065 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8v7t\" (UniqueName: \"kubernetes.io/projected/a06c80cd-520a-4f2d-883a-d901e843633c-kube-api-access-s8v7t\") pod \"ceilometer-0\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.659777 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:18 crc kubenswrapper[4492]: I1126 07:07:18.897953 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wnw6v"] Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.042117 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-564669c98c-fvvts" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.107010 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-75f4bc59f-8qztx"] Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.199511 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-cc797855b-f4zsl" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.237321 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.301836 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8e5e94c9-4c33-46c7-a990-2e0486653c4c","Type":"ContainerStarted","Data":"a09f6577c9eab79e0a17648886734baba497ee14abd76bda57bbf0e923e4684d"} Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.310262 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-65c87b98b5-7kjlx"] Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.320450 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wnw6v" event={"ID":"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa","Type":"ContainerStarted","Data":"ffe0c578b301d7c935504cc3bd70eeb82426bad001400dd8692551c5df83169b"} Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.332654 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.3326359070000002 podStartE2EDuration="3.332635907s" podCreationTimestamp="2025-11-26 07:07:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:19.329500551 +0000 UTC m=+1135.213388840" watchObservedRunningTime="2025-11-26 07:07:19.332635907 +0000 UTC m=+1135.216524205" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.441013 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.441064 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.441098 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.441433 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"087b801b537b43d8dae36da1027953befbb4ce83f773382d5e7a1b8510080157"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.441471 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://087b801b537b43d8dae36da1027953befbb4ce83f773382d5e7a1b8510080157" gracePeriod=600 Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.601002 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.733490 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.739928 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data\") pod \"3e950c7f-2241-4475-a891-d97102b54b9b\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.740205 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-98srx\" (UniqueName: \"kubernetes.io/projected/3e950c7f-2241-4475-a891-d97102b54b9b-kube-api-access-98srx\") pod \"3e950c7f-2241-4475-a891-d97102b54b9b\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.740230 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-combined-ca-bundle\") pod \"3e950c7f-2241-4475-a891-d97102b54b9b\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.740276 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data-custom\") pod \"3e950c7f-2241-4475-a891-d97102b54b9b\" (UID: \"3e950c7f-2241-4475-a891-d97102b54b9b\") " Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.748061 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e950c7f-2241-4475-a891-d97102b54b9b-kube-api-access-98srx" (OuterVolumeSpecName: "kube-api-access-98srx") pod "3e950c7f-2241-4475-a891-d97102b54b9b" (UID: "3e950c7f-2241-4475-a891-d97102b54b9b"). InnerVolumeSpecName "kube-api-access-98srx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.752980 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3e950c7f-2241-4475-a891-d97102b54b9b" (UID: "3e950c7f-2241-4475-a891-d97102b54b9b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.787386 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3e950c7f-2241-4475-a891-d97102b54b9b" (UID: "3e950c7f-2241-4475-a891-d97102b54b9b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.816628 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data" (OuterVolumeSpecName: "config-data") pod "3e950c7f-2241-4475-a891-d97102b54b9b" (UID: "3e950c7f-2241-4475-a891-d97102b54b9b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.843646 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data-custom\") pod \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.843828 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data\") pod \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.844128 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzp98\" (UniqueName: \"kubernetes.io/projected/c7817ef7-6407-49f2-8f5b-3357945f0ec0-kube-api-access-hzp98\") pod \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.844266 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-combined-ca-bundle\") pod \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\" (UID: \"c7817ef7-6407-49f2-8f5b-3357945f0ec0\") " Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.849439 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.849467 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-98srx\" (UniqueName: \"kubernetes.io/projected/3e950c7f-2241-4475-a891-d97102b54b9b-kube-api-access-98srx\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.849486 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.849499 4492 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e950c7f-2241-4475-a891-d97102b54b9b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.856272 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c7817ef7-6407-49f2-8f5b-3357945f0ec0" (UID: "c7817ef7-6407-49f2-8f5b-3357945f0ec0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.858513 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7817ef7-6407-49f2-8f5b-3357945f0ec0-kube-api-access-hzp98" (OuterVolumeSpecName: "kube-api-access-hzp98") pod "c7817ef7-6407-49f2-8f5b-3357945f0ec0" (UID: "c7817ef7-6407-49f2-8f5b-3357945f0ec0"). InnerVolumeSpecName "kube-api-access-hzp98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.878413 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7817ef7-6407-49f2-8f5b-3357945f0ec0" (UID: "c7817ef7-6407-49f2-8f5b-3357945f0ec0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.894357 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data" (OuterVolumeSpecName: "config-data") pod "c7817ef7-6407-49f2-8f5b-3357945f0ec0" (UID: "c7817ef7-6407-49f2-8f5b-3357945f0ec0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.951815 4492 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.951850 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.951863 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzp98\" (UniqueName: \"kubernetes.io/projected/c7817ef7-6407-49f2-8f5b-3357945f0ec0-kube-api-access-hzp98\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:19 crc kubenswrapper[4492]: I1126 07:07:19.951877 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7817ef7-6407-49f2-8f5b-3357945f0ec0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.347426 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerStarted","Data":"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0"} Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.347589 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerStarted","Data":"d1597052aa0ae832fbfdc8a90d96d9f052ee6a2b2aa2036bde8e0e27277e13e7"} Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.357852 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="087b801b537b43d8dae36da1027953befbb4ce83f773382d5e7a1b8510080157" exitCode=0 Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.357911 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"087b801b537b43d8dae36da1027953befbb4ce83f773382d5e7a1b8510080157"} Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.357949 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"0dcfa8699dfdd37bac8bee745692f7cd2b7b8f4c90664301cb53b268008d376c"} Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.357972 4492 scope.go:117] "RemoveContainer" containerID="a50e8acb1a9896b6c0b164453458208e77d6a13aa21290189661d9ca53c2668b" Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.365013 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-75f4bc59f-8qztx" Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.365130 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-75f4bc59f-8qztx" event={"ID":"3e950c7f-2241-4475-a891-d97102b54b9b","Type":"ContainerDied","Data":"e07f88b8a42caa8558b73b18d59d7a961e27d39b2719d3aca4a9bfa88972abdc"} Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.384309 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.385384 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-65c87b98b5-7kjlx" event={"ID":"c7817ef7-6407-49f2-8f5b-3357945f0ec0","Type":"ContainerDied","Data":"dfe55b887b861af9b7f4abe0123b6da00e6621af7fa9aa657a41cbf3198b57b0"} Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.478121 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-75f4bc59f-8qztx"] Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.478156 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-75f4bc59f-8qztx"] Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.497213 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-65c87b98b5-7kjlx"] Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.499543 4492 scope.go:117] "RemoveContainer" containerID="aef0af39dc68fdb9a04bc4ec82267b35281b86211b9b0adf8c987803d81f21f4" Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.509794 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-65c87b98b5-7kjlx"] Nov 26 07:07:20 crc kubenswrapper[4492]: I1126 07:07:20.543123 4492 scope.go:117] "RemoveContainer" containerID="ad49995e2aecaba86e1a66b69a5c31ce8b84c3fcda1fb95b90b721c027caf77a" Nov 26 07:07:21 crc kubenswrapper[4492]: I1126 07:07:21.419542 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerStarted","Data":"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50"} Nov 26 07:07:21 crc kubenswrapper[4492]: I1126 07:07:21.738561 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 07:07:21 crc kubenswrapper[4492]: I1126 07:07:21.739039 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 07:07:21 crc kubenswrapper[4492]: I1126 07:07:21.788467 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 07:07:21 crc kubenswrapper[4492]: I1126 07:07:21.829097 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 07:07:22 crc kubenswrapper[4492]: I1126 07:07:22.477956 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" path="/var/lib/kubelet/pods/3e950c7f-2241-4475-a891-d97102b54b9b/volumes" Nov 26 07:07:22 crc kubenswrapper[4492]: I1126 07:07:22.479036 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" path="/var/lib/kubelet/pods/c7817ef7-6407-49f2-8f5b-3357945f0ec0/volumes" Nov 26 07:07:22 crc kubenswrapper[4492]: I1126 07:07:22.507385 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerStarted","Data":"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027"} Nov 26 07:07:22 crc kubenswrapper[4492]: I1126 07:07:22.508265 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 07:07:22 crc kubenswrapper[4492]: I1126 07:07:22.508292 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 07:07:23 crc kubenswrapper[4492]: I1126 07:07:23.520398 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerStarted","Data":"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437"} Nov 26 07:07:23 crc kubenswrapper[4492]: I1126 07:07:23.520644 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:07:24 crc kubenswrapper[4492]: I1126 07:07:24.486134 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.707251549 podStartE2EDuration="6.486108126s" podCreationTimestamp="2025-11-26 07:07:18 +0000 UTC" firstStartedPulling="2025-11-26 07:07:19.276091539 +0000 UTC m=+1135.159979837" lastFinishedPulling="2025-11-26 07:07:23.054948116 +0000 UTC m=+1138.938836414" observedRunningTime="2025-11-26 07:07:23.540793073 +0000 UTC m=+1139.424681370" watchObservedRunningTime="2025-11-26 07:07:24.486108126 +0000 UTC m=+1140.369996425" Nov 26 07:07:24 crc kubenswrapper[4492]: I1126 07:07:24.683756 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-c57f564f4-75zkc" Nov 26 07:07:24 crc kubenswrapper[4492]: I1126 07:07:24.759798 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-85bd8ddcc5-m7gs8"] Nov 26 07:07:24 crc kubenswrapper[4492]: I1126 07:07:24.760594 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" podUID="27be14bd-76c4-41ab-bef2-0b1bcc13e0df" containerName="heat-engine" containerID="cri-o://040c72d181fef2f419d296b4226ad9596759b979def4f787fe5e843dcafb65e6" gracePeriod=60 Nov 26 07:07:25 crc kubenswrapper[4492]: I1126 07:07:25.500381 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 07:07:25 crc kubenswrapper[4492]: I1126 07:07:25.500615 4492 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:07:25 crc kubenswrapper[4492]: I1126 07:07:25.527677 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 07:07:26 crc kubenswrapper[4492]: I1126 07:07:26.485910 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:26 crc kubenswrapper[4492]: I1126 07:07:26.486325 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:26 crc kubenswrapper[4492]: I1126 07:07:26.525864 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:26 crc kubenswrapper[4492]: I1126 07:07:26.559739 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:26 crc kubenswrapper[4492]: I1126 07:07:26.575106 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:26 crc kubenswrapper[4492]: I1126 07:07:26.575283 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:27 crc kubenswrapper[4492]: E1126 07:07:27.382211 4492 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="040c72d181fef2f419d296b4226ad9596759b979def4f787fe5e843dcafb65e6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 07:07:27 crc kubenswrapper[4492]: E1126 07:07:27.385780 4492 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="040c72d181fef2f419d296b4226ad9596759b979def4f787fe5e843dcafb65e6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 07:07:27 crc kubenswrapper[4492]: E1126 07:07:27.405301 4492 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="040c72d181fef2f419d296b4226ad9596759b979def4f787fe5e843dcafb65e6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 07:07:27 crc kubenswrapper[4492]: E1126 07:07:27.405379 4492 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" podUID="27be14bd-76c4-41ab-bef2-0b1bcc13e0df" containerName="heat-engine" Nov 26 07:07:30 crc kubenswrapper[4492]: I1126 07:07:30.238504 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:30 crc kubenswrapper[4492]: I1126 07:07:30.241167 4492 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 07:07:30 crc kubenswrapper[4492]: I1126 07:07:30.624017 4492 generic.go:334] "Generic (PLEG): container finished" podID="27be14bd-76c4-41ab-bef2-0b1bcc13e0df" containerID="040c72d181fef2f419d296b4226ad9596759b979def4f787fe5e843dcafb65e6" exitCode=0 Nov 26 07:07:30 crc kubenswrapper[4492]: I1126 07:07:30.624090 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" event={"ID":"27be14bd-76c4-41ab-bef2-0b1bcc13e0df","Type":"ContainerDied","Data":"040c72d181fef2f419d296b4226ad9596759b979def4f787fe5e843dcafb65e6"} Nov 26 07:07:30 crc kubenswrapper[4492]: I1126 07:07:30.660506 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 07:07:33 crc kubenswrapper[4492]: I1126 07:07:33.963485 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.050518 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smnzp\" (UniqueName: \"kubernetes.io/projected/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-kube-api-access-smnzp\") pod \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.050797 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-combined-ca-bundle\") pod \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.050844 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data\") pod \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.050883 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data-custom\") pod \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\" (UID: \"27be14bd-76c4-41ab-bef2-0b1bcc13e0df\") " Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.057411 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "27be14bd-76c4-41ab-bef2-0b1bcc13e0df" (UID: "27be14bd-76c4-41ab-bef2-0b1bcc13e0df"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.059203 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-kube-api-access-smnzp" (OuterVolumeSpecName: "kube-api-access-smnzp") pod "27be14bd-76c4-41ab-bef2-0b1bcc13e0df" (UID: "27be14bd-76c4-41ab-bef2-0b1bcc13e0df"). InnerVolumeSpecName "kube-api-access-smnzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.088951 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27be14bd-76c4-41ab-bef2-0b1bcc13e0df" (UID: "27be14bd-76c4-41ab-bef2-0b1bcc13e0df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.114980 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data" (OuterVolumeSpecName: "config-data") pod "27be14bd-76c4-41ab-bef2-0b1bcc13e0df" (UID: "27be14bd-76c4-41ab-bef2-0b1bcc13e0df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.152898 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smnzp\" (UniqueName: \"kubernetes.io/projected/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-kube-api-access-smnzp\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.152926 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.152946 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.152957 4492 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/27be14bd-76c4-41ab-bef2-0b1bcc13e0df-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.710431 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wnw6v" event={"ID":"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa","Type":"ContainerStarted","Data":"6980074a256088ca155f9a2337d5f00ab5b25870c482a343f59fe2b9d33e6976"} Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.715157 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" event={"ID":"27be14bd-76c4-41ab-bef2-0b1bcc13e0df","Type":"ContainerDied","Data":"f0fd6a44937d8fd3b20a558da35f27d8a52d11fe50191816135fc613bf955950"} Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.715241 4492 scope.go:117] "RemoveContainer" containerID="040c72d181fef2f419d296b4226ad9596759b979def4f787fe5e843dcafb65e6" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.715367 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-85bd8ddcc5-m7gs8" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.735483 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-wnw6v" podStartSLOduration=2.8254680800000003 podStartE2EDuration="17.735463745s" podCreationTimestamp="2025-11-26 07:07:17 +0000 UTC" firstStartedPulling="2025-11-26 07:07:18.933265661 +0000 UTC m=+1134.817153959" lastFinishedPulling="2025-11-26 07:07:33.843261326 +0000 UTC m=+1149.727149624" observedRunningTime="2025-11-26 07:07:34.729237489 +0000 UTC m=+1150.613125787" watchObservedRunningTime="2025-11-26 07:07:34.735463745 +0000 UTC m=+1150.619352042" Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.751185 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-85bd8ddcc5-m7gs8"] Nov 26 07:07:34 crc kubenswrapper[4492]: I1126 07:07:34.755626 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-85bd8ddcc5-m7gs8"] Nov 26 07:07:36 crc kubenswrapper[4492]: I1126 07:07:36.449822 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27be14bd-76c4-41ab-bef2-0b1bcc13e0df" path="/var/lib/kubelet/pods/27be14bd-76c4-41ab-bef2-0b1bcc13e0df/volumes" Nov 26 07:07:36 crc kubenswrapper[4492]: I1126 07:07:36.529355 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:36 crc kubenswrapper[4492]: I1126 07:07:36.529647 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="ceilometer-central-agent" containerID="cri-o://1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0" gracePeriod=30 Nov 26 07:07:36 crc kubenswrapper[4492]: I1126 07:07:36.532239 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="sg-core" containerID="cri-o://272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027" gracePeriod=30 Nov 26 07:07:36 crc kubenswrapper[4492]: I1126 07:07:36.532294 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="ceilometer-notification-agent" containerID="cri-o://7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50" gracePeriod=30 Nov 26 07:07:36 crc kubenswrapper[4492]: I1126 07:07:36.532427 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="proxy-httpd" containerID="cri-o://13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437" gracePeriod=30 Nov 26 07:07:36 crc kubenswrapper[4492]: I1126 07:07:36.545921 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.196:3000/\": EOF" Nov 26 07:07:36 crc kubenswrapper[4492]: I1126 07:07:36.739072 4492 generic.go:334] "Generic (PLEG): container finished" podID="a06c80cd-520a-4f2d-883a-d901e843633c" containerID="272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027" exitCode=2 Nov 26 07:07:36 crc kubenswrapper[4492]: I1126 07:07:36.739460 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerDied","Data":"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027"} Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.521859 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.628905 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8v7t\" (UniqueName: \"kubernetes.io/projected/a06c80cd-520a-4f2d-883a-d901e843633c-kube-api-access-s8v7t\") pod \"a06c80cd-520a-4f2d-883a-d901e843633c\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.629031 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-scripts\") pod \"a06c80cd-520a-4f2d-883a-d901e843633c\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.629073 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-log-httpd\") pod \"a06c80cd-520a-4f2d-883a-d901e843633c\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.629102 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-run-httpd\") pod \"a06c80cd-520a-4f2d-883a-d901e843633c\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.629167 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-combined-ca-bundle\") pod \"a06c80cd-520a-4f2d-883a-d901e843633c\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.629224 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-sg-core-conf-yaml\") pod \"a06c80cd-520a-4f2d-883a-d901e843633c\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.629289 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-config-data\") pod \"a06c80cd-520a-4f2d-883a-d901e843633c\" (UID: \"a06c80cd-520a-4f2d-883a-d901e843633c\") " Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.631003 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a06c80cd-520a-4f2d-883a-d901e843633c" (UID: "a06c80cd-520a-4f2d-883a-d901e843633c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.631248 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a06c80cd-520a-4f2d-883a-d901e843633c" (UID: "a06c80cd-520a-4f2d-883a-d901e843633c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.636764 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-scripts" (OuterVolumeSpecName: "scripts") pod "a06c80cd-520a-4f2d-883a-d901e843633c" (UID: "a06c80cd-520a-4f2d-883a-d901e843633c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.640085 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a06c80cd-520a-4f2d-883a-d901e843633c-kube-api-access-s8v7t" (OuterVolumeSpecName: "kube-api-access-s8v7t") pod "a06c80cd-520a-4f2d-883a-d901e843633c" (UID: "a06c80cd-520a-4f2d-883a-d901e843633c"). InnerVolumeSpecName "kube-api-access-s8v7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.661665 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a06c80cd-520a-4f2d-883a-d901e843633c" (UID: "a06c80cd-520a-4f2d-883a-d901e843633c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.694233 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a06c80cd-520a-4f2d-883a-d901e843633c" (UID: "a06c80cd-520a-4f2d-883a-d901e843633c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.720225 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-config-data" (OuterVolumeSpecName: "config-data") pod "a06c80cd-520a-4f2d-883a-d901e843633c" (UID: "a06c80cd-520a-4f2d-883a-d901e843633c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.732241 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8v7t\" (UniqueName: \"kubernetes.io/projected/a06c80cd-520a-4f2d-883a-d901e843633c-kube-api-access-s8v7t\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.732272 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.732285 4492 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.732298 4492 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a06c80cd-520a-4f2d-883a-d901e843633c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.732308 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.732318 4492 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.732328 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a06c80cd-520a-4f2d-883a-d901e843633c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.754338 4492 generic.go:334] "Generic (PLEG): container finished" podID="a06c80cd-520a-4f2d-883a-d901e843633c" containerID="13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437" exitCode=0 Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.754443 4492 generic.go:334] "Generic (PLEG): container finished" podID="a06c80cd-520a-4f2d-883a-d901e843633c" containerID="7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50" exitCode=0 Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.754455 4492 generic.go:334] "Generic (PLEG): container finished" podID="a06c80cd-520a-4f2d-883a-d901e843633c" containerID="1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0" exitCode=0 Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.754499 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerDied","Data":"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437"} Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.754511 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.754541 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerDied","Data":"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50"} Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.754558 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerDied","Data":"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0"} Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.754571 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a06c80cd-520a-4f2d-883a-d901e843633c","Type":"ContainerDied","Data":"d1597052aa0ae832fbfdc8a90d96d9f052ee6a2b2aa2036bde8e0e27277e13e7"} Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.754594 4492 scope.go:117] "RemoveContainer" containerID="13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.779069 4492 scope.go:117] "RemoveContainer" containerID="272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.799133 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.805623 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.819478 4492 scope.go:117] "RemoveContainer" containerID="7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.820492 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.822034 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" containerName="heat-cfnapi" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822085 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" containerName="heat-cfnapi" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.822110 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27be14bd-76c4-41ab-bef2-0b1bcc13e0df" containerName="heat-engine" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822146 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="27be14bd-76c4-41ab-bef2-0b1bcc13e0df" containerName="heat-engine" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.822156 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" containerName="heat-cfnapi" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822163 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" containerName="heat-cfnapi" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.822203 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="sg-core" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822212 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="sg-core" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.822236 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" containerName="heat-api" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822243 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" containerName="heat-api" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.822295 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="ceilometer-central-agent" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822302 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="ceilometer-central-agent" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.822317 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="ceilometer-notification-agent" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822325 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="ceilometer-notification-agent" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.822334 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" containerName="heat-api" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822339 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" containerName="heat-api" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.822380 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="proxy-httpd" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822388 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="proxy-httpd" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822872 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" containerName="heat-cfnapi" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822898 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="proxy-httpd" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.822981 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="ceilometer-central-agent" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.823023 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" containerName="heat-api" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.823033 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="sg-core" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.823051 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="27be14bd-76c4-41ab-bef2-0b1bcc13e0df" containerName="heat-engine" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.823064 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" containerName="ceilometer-notification-agent" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.823116 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e950c7f-2241-4475-a891-d97102b54b9b" containerName="heat-api" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.828240 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7817ef7-6407-49f2-8f5b-3357945f0ec0" containerName="heat-cfnapi" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.834480 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.834633 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.852447 4492 scope.go:117] "RemoveContainer" containerID="1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.854825 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.858388 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.887852 4492 scope.go:117] "RemoveContainer" containerID="13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.888431 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437\": container with ID starting with 13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437 not found: ID does not exist" containerID="13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.888478 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437"} err="failed to get container status \"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437\": rpc error: code = NotFound desc = could not find container \"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437\": container with ID starting with 13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.888512 4492 scope.go:117] "RemoveContainer" containerID="272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.888961 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027\": container with ID starting with 272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027 not found: ID does not exist" containerID="272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.889055 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027"} err="failed to get container status \"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027\": rpc error: code = NotFound desc = could not find container \"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027\": container with ID starting with 272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.889134 4492 scope.go:117] "RemoveContainer" containerID="7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.889860 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50\": container with ID starting with 7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50 not found: ID does not exist" containerID="7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.889892 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50"} err="failed to get container status \"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50\": rpc error: code = NotFound desc = could not find container \"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50\": container with ID starting with 7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.889910 4492 scope.go:117] "RemoveContainer" containerID="1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0" Nov 26 07:07:37 crc kubenswrapper[4492]: E1126 07:07:37.890693 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0\": container with ID starting with 1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0 not found: ID does not exist" containerID="1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.890774 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0"} err="failed to get container status \"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0\": rpc error: code = NotFound desc = could not find container \"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0\": container with ID starting with 1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.890863 4492 scope.go:117] "RemoveContainer" containerID="13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.891201 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437"} err="failed to get container status \"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437\": rpc error: code = NotFound desc = could not find container \"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437\": container with ID starting with 13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.891225 4492 scope.go:117] "RemoveContainer" containerID="272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.891735 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027"} err="failed to get container status \"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027\": rpc error: code = NotFound desc = could not find container \"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027\": container with ID starting with 272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.891782 4492 scope.go:117] "RemoveContainer" containerID="7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.892110 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50"} err="failed to get container status \"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50\": rpc error: code = NotFound desc = could not find container \"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50\": container with ID starting with 7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.892134 4492 scope.go:117] "RemoveContainer" containerID="1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.892517 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0"} err="failed to get container status \"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0\": rpc error: code = NotFound desc = could not find container \"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0\": container with ID starting with 1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.892613 4492 scope.go:117] "RemoveContainer" containerID="13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.893053 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437"} err="failed to get container status \"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437\": rpc error: code = NotFound desc = could not find container \"13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437\": container with ID starting with 13c3904599882233699afa6fe76d5912c7a8a90ea2540c9e4184677414dcf437 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.893081 4492 scope.go:117] "RemoveContainer" containerID="272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.893427 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027"} err="failed to get container status \"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027\": rpc error: code = NotFound desc = could not find container \"272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027\": container with ID starting with 272fc3681cf626470a9bf23a14bc84e89a93f11537e07916a95c5bd3b9efe027 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.893449 4492 scope.go:117] "RemoveContainer" containerID="7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.893754 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50"} err="failed to get container status \"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50\": rpc error: code = NotFound desc = could not find container \"7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50\": container with ID starting with 7d0a32afcee64c8d9a3daddb305f1e07aef2571b6cee8f9d6f934ce8323bdd50 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.893844 4492 scope.go:117] "RemoveContainer" containerID="1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.894148 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0"} err="failed to get container status \"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0\": rpc error: code = NotFound desc = could not find container \"1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0\": container with ID starting with 1cbc9001acd903a36603c5b59d269cf862dfb33fc5fee575dbb53a1567bdc8d0 not found: ID does not exist" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.948304 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-log-httpd\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.948394 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.948415 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.948439 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzbwx\" (UniqueName: \"kubernetes.io/projected/d0f1313d-04c9-4d97-9339-31b1549776cb-kube-api-access-gzbwx\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.948517 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-config-data\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.948539 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-scripts\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:37 crc kubenswrapper[4492]: I1126 07:07:37.948566 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-run-httpd\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.053188 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-config-data\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.054196 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-scripts\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.054429 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-run-httpd\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.054577 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-log-httpd\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.054807 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.054922 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.055060 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzbwx\" (UniqueName: \"kubernetes.io/projected/d0f1313d-04c9-4d97-9339-31b1549776cb-kube-api-access-gzbwx\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.055765 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-run-httpd\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.055786 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-log-httpd\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.058756 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-scripts\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.058831 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.059803 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-config-data\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.068498 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.078330 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzbwx\" (UniqueName: \"kubernetes.io/projected/d0f1313d-04c9-4d97-9339-31b1549776cb-kube-api-access-gzbwx\") pod \"ceilometer-0\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.165600 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.459826 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a06c80cd-520a-4f2d-883a-d901e843633c" path="/var/lib/kubelet/pods/a06c80cd-520a-4f2d-883a-d901e843633c/volumes" Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.563686 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:38 crc kubenswrapper[4492]: I1126 07:07:38.763663 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerStarted","Data":"7d3d12b42c9d6e7e633addbf16fdc773be564dcb249daff6ed7310cd3cdb3684"} Nov 26 07:07:39 crc kubenswrapper[4492]: I1126 07:07:39.778539 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerStarted","Data":"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666"} Nov 26 07:07:40 crc kubenswrapper[4492]: I1126 07:07:40.819775 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerStarted","Data":"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f"} Nov 26 07:07:41 crc kubenswrapper[4492]: I1126 07:07:41.833211 4492 generic.go:334] "Generic (PLEG): container finished" podID="eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" containerID="6980074a256088ca155f9a2337d5f00ab5b25870c482a343f59fe2b9d33e6976" exitCode=0 Nov 26 07:07:41 crc kubenswrapper[4492]: I1126 07:07:41.833763 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wnw6v" event={"ID":"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa","Type":"ContainerDied","Data":"6980074a256088ca155f9a2337d5f00ab5b25870c482a343f59fe2b9d33e6976"} Nov 26 07:07:41 crc kubenswrapper[4492]: I1126 07:07:41.838336 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerStarted","Data":"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6"} Nov 26 07:07:42 crc kubenswrapper[4492]: I1126 07:07:42.860211 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerStarted","Data":"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77"} Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.200459 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.227002 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.182284886 podStartE2EDuration="6.2269794s" podCreationTimestamp="2025-11-26 07:07:37 +0000 UTC" firstStartedPulling="2025-11-26 07:07:38.581277247 +0000 UTC m=+1154.465165544" lastFinishedPulling="2025-11-26 07:07:42.62597176 +0000 UTC m=+1158.509860058" observedRunningTime="2025-11-26 07:07:42.89058342 +0000 UTC m=+1158.774471718" watchObservedRunningTime="2025-11-26 07:07:43.2269794 +0000 UTC m=+1159.110867697" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.265769 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-combined-ca-bundle\") pod \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.266100 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jt9k2\" (UniqueName: \"kubernetes.io/projected/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-kube-api-access-jt9k2\") pod \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.266225 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-config-data\") pod \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.266289 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-scripts\") pod \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " Nov 26 07:07:43 crc kubenswrapper[4492]: E1126 07:07:43.288235 4492 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-combined-ca-bundle podName:eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa nodeName:}" failed. No retries permitted until 2025-11-26 07:07:43.788165043 +0000 UTC m=+1159.672053342 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-combined-ca-bundle") pod "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" (UID: "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa") : error deleting /var/lib/kubelet/pods/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa/volume-subpaths: remove /var/lib/kubelet/pods/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa/volume-subpaths: no such file or directory Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.288475 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-kube-api-access-jt9k2" (OuterVolumeSpecName: "kube-api-access-jt9k2") pod "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" (UID: "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa"). InnerVolumeSpecName "kube-api-access-jt9k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.294283 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-config-data" (OuterVolumeSpecName: "config-data") pod "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" (UID: "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.294329 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-scripts" (OuterVolumeSpecName: "scripts") pod "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" (UID: "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.368389 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jt9k2\" (UniqueName: \"kubernetes.io/projected/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-kube-api-access-jt9k2\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.368417 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.368429 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.871874 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wnw6v" event={"ID":"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa","Type":"ContainerDied","Data":"ffe0c578b301d7c935504cc3bd70eeb82426bad001400dd8692551c5df83169b"} Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.871943 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ffe0c578b301d7c935504cc3bd70eeb82426bad001400dd8692551c5df83169b" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.871908 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wnw6v" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.872054 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.878000 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-combined-ca-bundle\") pod \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\" (UID: \"eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa\") " Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.892463 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" (UID: "eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.963933 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 07:07:43 crc kubenswrapper[4492]: E1126 07:07:43.964297 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" containerName="nova-cell0-conductor-db-sync" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.964314 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" containerName="nova-cell0-conductor-db-sync" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.964479 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" containerName="nova-cell0-conductor-db-sync" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.964987 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:43 crc kubenswrapper[4492]: I1126 07:07:43.994484 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.014274 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.096441 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008914c9-e527-46e5-9551-23b0efb08208-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"008914c9-e527-46e5-9551-23b0efb08208\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.096525 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008914c9-e527-46e5-9551-23b0efb08208-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"008914c9-e527-46e5-9551-23b0efb08208\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.096586 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdlf4\" (UniqueName: \"kubernetes.io/projected/008914c9-e527-46e5-9551-23b0efb08208-kube-api-access-wdlf4\") pod \"nova-cell0-conductor-0\" (UID: \"008914c9-e527-46e5-9551-23b0efb08208\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.198398 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008914c9-e527-46e5-9551-23b0efb08208-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"008914c9-e527-46e5-9551-23b0efb08208\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.198499 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdlf4\" (UniqueName: \"kubernetes.io/projected/008914c9-e527-46e5-9551-23b0efb08208-kube-api-access-wdlf4\") pod \"nova-cell0-conductor-0\" (UID: \"008914c9-e527-46e5-9551-23b0efb08208\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.198603 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008914c9-e527-46e5-9551-23b0efb08208-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"008914c9-e527-46e5-9551-23b0efb08208\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.202414 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008914c9-e527-46e5-9551-23b0efb08208-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"008914c9-e527-46e5-9551-23b0efb08208\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.207997 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008914c9-e527-46e5-9551-23b0efb08208-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"008914c9-e527-46e5-9551-23b0efb08208\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.216986 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdlf4\" (UniqueName: \"kubernetes.io/projected/008914c9-e527-46e5-9551-23b0efb08208-kube-api-access-wdlf4\") pod \"nova-cell0-conductor-0\" (UID: \"008914c9-e527-46e5-9551-23b0efb08208\") " pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.278871 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.752482 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.882825 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"008914c9-e527-46e5-9551-23b0efb08208","Type":"ContainerStarted","Data":"ecfeac808ad002667f20fbc7a875e24c82b08885cea7903174843748aa02095e"} Nov 26 07:07:44 crc kubenswrapper[4492]: I1126 07:07:44.963760 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:45 crc kubenswrapper[4492]: I1126 07:07:45.899870 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="ceilometer-central-agent" containerID="cri-o://d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666" gracePeriod=30 Nov 26 07:07:45 crc kubenswrapper[4492]: I1126 07:07:45.900654 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"008914c9-e527-46e5-9551-23b0efb08208","Type":"ContainerStarted","Data":"0b6d41eeeb868e520fea9a49ef006d772de5b250a9462f76f50c69bd7e3596ac"} Nov 26 07:07:45 crc kubenswrapper[4492]: I1126 07:07:45.901353 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:45 crc kubenswrapper[4492]: I1126 07:07:45.901413 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="proxy-httpd" containerID="cri-o://00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77" gracePeriod=30 Nov 26 07:07:45 crc kubenswrapper[4492]: I1126 07:07:45.901480 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="sg-core" containerID="cri-o://3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6" gracePeriod=30 Nov 26 07:07:45 crc kubenswrapper[4492]: I1126 07:07:45.901528 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="ceilometer-notification-agent" containerID="cri-o://cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f" gracePeriod=30 Nov 26 07:07:45 crc kubenswrapper[4492]: I1126 07:07:45.924444 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.924411588 podStartE2EDuration="2.924411588s" podCreationTimestamp="2025-11-26 07:07:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:45.923068011 +0000 UTC m=+1161.806956309" watchObservedRunningTime="2025-11-26 07:07:45.924411588 +0000 UTC m=+1161.808299886" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.576656 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.650904 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-combined-ca-bundle\") pod \"d0f1313d-04c9-4d97-9339-31b1549776cb\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.651146 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-run-httpd\") pod \"d0f1313d-04c9-4d97-9339-31b1549776cb\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.651184 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-scripts\") pod \"d0f1313d-04c9-4d97-9339-31b1549776cb\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.651242 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-log-httpd\") pod \"d0f1313d-04c9-4d97-9339-31b1549776cb\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.651308 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzbwx\" (UniqueName: \"kubernetes.io/projected/d0f1313d-04c9-4d97-9339-31b1549776cb-kube-api-access-gzbwx\") pod \"d0f1313d-04c9-4d97-9339-31b1549776cb\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.651418 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-sg-core-conf-yaml\") pod \"d0f1313d-04c9-4d97-9339-31b1549776cb\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.651463 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-config-data\") pod \"d0f1313d-04c9-4d97-9339-31b1549776cb\" (UID: \"d0f1313d-04c9-4d97-9339-31b1549776cb\") " Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.652281 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d0f1313d-04c9-4d97-9339-31b1549776cb" (UID: "d0f1313d-04c9-4d97-9339-31b1549776cb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.652468 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d0f1313d-04c9-4d97-9339-31b1549776cb" (UID: "d0f1313d-04c9-4d97-9339-31b1549776cb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.657333 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0f1313d-04c9-4d97-9339-31b1549776cb-kube-api-access-gzbwx" (OuterVolumeSpecName: "kube-api-access-gzbwx") pod "d0f1313d-04c9-4d97-9339-31b1549776cb" (UID: "d0f1313d-04c9-4d97-9339-31b1549776cb"). InnerVolumeSpecName "kube-api-access-gzbwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.657686 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-scripts" (OuterVolumeSpecName: "scripts") pod "d0f1313d-04c9-4d97-9339-31b1549776cb" (UID: "d0f1313d-04c9-4d97-9339-31b1549776cb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.676312 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d0f1313d-04c9-4d97-9339-31b1549776cb" (UID: "d0f1313d-04c9-4d97-9339-31b1549776cb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.708991 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0f1313d-04c9-4d97-9339-31b1549776cb" (UID: "d0f1313d-04c9-4d97-9339-31b1549776cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.724469 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-config-data" (OuterVolumeSpecName: "config-data") pod "d0f1313d-04c9-4d97-9339-31b1549776cb" (UID: "d0f1313d-04c9-4d97-9339-31b1549776cb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.753565 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzbwx\" (UniqueName: \"kubernetes.io/projected/d0f1313d-04c9-4d97-9339-31b1549776cb-kube-api-access-gzbwx\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.753724 4492 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.753789 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.753843 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.753892 4492 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.753985 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0f1313d-04c9-4d97-9339-31b1549776cb-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.754043 4492 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d0f1313d-04c9-4d97-9339-31b1549776cb-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.909964 4492 generic.go:334] "Generic (PLEG): container finished" podID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerID="00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77" exitCode=0 Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.909994 4492 generic.go:334] "Generic (PLEG): container finished" podID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerID="3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6" exitCode=2 Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.910002 4492 generic.go:334] "Generic (PLEG): container finished" podID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerID="cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f" exitCode=0 Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.910009 4492 generic.go:334] "Generic (PLEG): container finished" podID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerID="d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666" exitCode=0 Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.910046 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.910089 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerDied","Data":"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77"} Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.910119 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerDied","Data":"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6"} Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.910129 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerDied","Data":"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f"} Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.910137 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerDied","Data":"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666"} Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.910146 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d0f1313d-04c9-4d97-9339-31b1549776cb","Type":"ContainerDied","Data":"7d3d12b42c9d6e7e633addbf16fdc773be564dcb249daff6ed7310cd3cdb3684"} Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.910161 4492 scope.go:117] "RemoveContainer" containerID="00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.930848 4492 scope.go:117] "RemoveContainer" containerID="3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.950214 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.971534 4492 scope.go:117] "RemoveContainer" containerID="cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f" Nov 26 07:07:46 crc kubenswrapper[4492]: I1126 07:07:46.996238 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.014844 4492 scope.go:117] "RemoveContainer" containerID="d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.023605 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:47 crc kubenswrapper[4492]: E1126 07:07:47.025909 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="ceilometer-notification-agent" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.025949 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="ceilometer-notification-agent" Nov 26 07:07:47 crc kubenswrapper[4492]: E1126 07:07:47.025974 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="ceilometer-central-agent" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.025982 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="ceilometer-central-agent" Nov 26 07:07:47 crc kubenswrapper[4492]: E1126 07:07:47.026002 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="sg-core" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.026009 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="sg-core" Nov 26 07:07:47 crc kubenswrapper[4492]: E1126 07:07:47.026046 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="proxy-httpd" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.026053 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="proxy-httpd" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.026739 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="ceilometer-central-agent" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.026774 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="sg-core" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.026785 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="ceilometer-notification-agent" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.035261 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" containerName="proxy-httpd" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.049312 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.049499 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.052261 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.052885 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.101892 4492 scope.go:117] "RemoveContainer" containerID="00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77" Nov 26 07:07:47 crc kubenswrapper[4492]: E1126 07:07:47.102439 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77\": container with ID starting with 00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77 not found: ID does not exist" containerID="00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.102477 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77"} err="failed to get container status \"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77\": rpc error: code = NotFound desc = could not find container \"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77\": container with ID starting with 00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.102509 4492 scope.go:117] "RemoveContainer" containerID="3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6" Nov 26 07:07:47 crc kubenswrapper[4492]: E1126 07:07:47.102981 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6\": container with ID starting with 3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6 not found: ID does not exist" containerID="3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.103072 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6"} err="failed to get container status \"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6\": rpc error: code = NotFound desc = could not find container \"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6\": container with ID starting with 3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.103090 4492 scope.go:117] "RemoveContainer" containerID="cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f" Nov 26 07:07:47 crc kubenswrapper[4492]: E1126 07:07:47.103697 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f\": container with ID starting with cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f not found: ID does not exist" containerID="cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.103767 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f"} err="failed to get container status \"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f\": rpc error: code = NotFound desc = could not find container \"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f\": container with ID starting with cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.103805 4492 scope.go:117] "RemoveContainer" containerID="d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666" Nov 26 07:07:47 crc kubenswrapper[4492]: E1126 07:07:47.104417 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666\": container with ID starting with d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666 not found: ID does not exist" containerID="d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.104469 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666"} err="failed to get container status \"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666\": rpc error: code = NotFound desc = could not find container \"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666\": container with ID starting with d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.104511 4492 scope.go:117] "RemoveContainer" containerID="00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.104865 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77"} err="failed to get container status \"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77\": rpc error: code = NotFound desc = could not find container \"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77\": container with ID starting with 00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.104893 4492 scope.go:117] "RemoveContainer" containerID="3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.105154 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6"} err="failed to get container status \"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6\": rpc error: code = NotFound desc = could not find container \"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6\": container with ID starting with 3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.105229 4492 scope.go:117] "RemoveContainer" containerID="cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.105535 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f"} err="failed to get container status \"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f\": rpc error: code = NotFound desc = could not find container \"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f\": container with ID starting with cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.105564 4492 scope.go:117] "RemoveContainer" containerID="d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.105900 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666"} err="failed to get container status \"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666\": rpc error: code = NotFound desc = could not find container \"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666\": container with ID starting with d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.105922 4492 scope.go:117] "RemoveContainer" containerID="00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.106193 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77"} err="failed to get container status \"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77\": rpc error: code = NotFound desc = could not find container \"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77\": container with ID starting with 00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.106218 4492 scope.go:117] "RemoveContainer" containerID="3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.106549 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6"} err="failed to get container status \"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6\": rpc error: code = NotFound desc = could not find container \"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6\": container with ID starting with 3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.106570 4492 scope.go:117] "RemoveContainer" containerID="cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.106885 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f"} err="failed to get container status \"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f\": rpc error: code = NotFound desc = could not find container \"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f\": container with ID starting with cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.106969 4492 scope.go:117] "RemoveContainer" containerID="d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.107238 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666"} err="failed to get container status \"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666\": rpc error: code = NotFound desc = could not find container \"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666\": container with ID starting with d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.107263 4492 scope.go:117] "RemoveContainer" containerID="00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.107688 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77"} err="failed to get container status \"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77\": rpc error: code = NotFound desc = could not find container \"00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77\": container with ID starting with 00bb1a9e59966f1c51718cbdb732daa85702692c3e58b5b1814a351405943e77 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.107709 4492 scope.go:117] "RemoveContainer" containerID="3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.108001 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6"} err="failed to get container status \"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6\": rpc error: code = NotFound desc = could not find container \"3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6\": container with ID starting with 3a3f66491451c28b610d2970e1622e25f797dea48665fc4d4e8b4783cb269bd6 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.108024 4492 scope.go:117] "RemoveContainer" containerID="cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.108284 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f"} err="failed to get container status \"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f\": rpc error: code = NotFound desc = could not find container \"cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f\": container with ID starting with cfeb6c9b597aaca3475ac5c07af3cfafdb048f3a78dd849e2c753d2b414e0e6f not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.108305 4492 scope.go:117] "RemoveContainer" containerID="d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.108498 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666"} err="failed to get container status \"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666\": rpc error: code = NotFound desc = could not find container \"d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666\": container with ID starting with d4404f5d438d16fc5d55fa0947c3d7d9d0d0ee9ad9f890b4cdf0f5f501eb0666 not found: ID does not exist" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.168926 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-config-data\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.168981 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-run-httpd\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.169011 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.169042 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-log-httpd\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.169073 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.169137 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-scripts\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.169166 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srx2j\" (UniqueName: \"kubernetes.io/projected/35c37b78-2ced-4321-a6bf-8eabc82419df-kube-api-access-srx2j\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.270262 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-config-data\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.270322 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-run-httpd\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.270352 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.270384 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-log-httpd\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.270428 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.270502 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-scripts\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.270538 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srx2j\" (UniqueName: \"kubernetes.io/projected/35c37b78-2ced-4321-a6bf-8eabc82419df-kube-api-access-srx2j\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.271624 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-log-httpd\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.272328 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-run-httpd\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.276709 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.277295 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-scripts\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.282503 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.291948 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-config-data\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.303507 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srx2j\" (UniqueName: \"kubernetes.io/projected/35c37b78-2ced-4321-a6bf-8eabc82419df-kube-api-access-srx2j\") pod \"ceilometer-0\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.406009 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.831036 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:07:47 crc kubenswrapper[4492]: W1126 07:07:47.831307 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod35c37b78_2ced_4321_a6bf_8eabc82419df.slice/crio-f33e411c3fa8d57575946470bf7bf6f62ad25dc3d7085e325a7c9b23eaac277b WatchSource:0}: Error finding container f33e411c3fa8d57575946470bf7bf6f62ad25dc3d7085e325a7c9b23eaac277b: Status 404 returned error can't find the container with id f33e411c3fa8d57575946470bf7bf6f62ad25dc3d7085e325a7c9b23eaac277b Nov 26 07:07:47 crc kubenswrapper[4492]: I1126 07:07:47.918513 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerStarted","Data":"f33e411c3fa8d57575946470bf7bf6f62ad25dc3d7085e325a7c9b23eaac277b"} Nov 26 07:07:48 crc kubenswrapper[4492]: I1126 07:07:48.450350 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0f1313d-04c9-4d97-9339-31b1549776cb" path="/var/lib/kubelet/pods/d0f1313d-04c9-4d97-9339-31b1549776cb/volumes" Nov 26 07:07:48 crc kubenswrapper[4492]: I1126 07:07:48.930157 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerStarted","Data":"4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647"} Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.315944 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.775917 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-9ftsf"] Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.778215 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.782468 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.786210 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.794774 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-9ftsf"] Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.921086 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtkzp\" (UniqueName: \"kubernetes.io/projected/6ca524b1-6cc4-4333-a4fe-724793248c10-kube-api-access-xtkzp\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.921828 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-scripts\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.921967 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-config-data\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.922093 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.940234 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerStarted","Data":"c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153"} Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.961970 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.966299 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:49 crc kubenswrapper[4492]: I1126 07:07:49.969115 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.011735 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.027492 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.027807 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtkzp\" (UniqueName: \"kubernetes.io/projected/6ca524b1-6cc4-4333-a4fe-724793248c10-kube-api-access-xtkzp\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.027893 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-scripts\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.027997 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-config-data\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.034180 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.035806 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.038473 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.047685 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-scripts\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.051077 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-config-data\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.057373 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtkzp\" (UniqueName: \"kubernetes.io/projected/6ca524b1-6cc4-4333-a4fe-724793248c10-kube-api-access-xtkzp\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.058610 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-9ftsf\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.063370 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.095778 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.132681 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmmqb\" (UniqueName: \"kubernetes.io/projected/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-kube-api-access-wmmqb\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.132728 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-logs\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.132777 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvdq4\" (UniqueName: \"kubernetes.io/projected/a7a8743c-1051-4955-86f7-9ddf8f356459-kube-api-access-pvdq4\") pod \"nova-cell1-novncproxy-0\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.132840 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.132867 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.132883 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-config-data\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.132916 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.240318 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.240411 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.253956 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.256135 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-config-data\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.256303 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.256427 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmmqb\" (UniqueName: \"kubernetes.io/projected/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-kube-api-access-wmmqb\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.256461 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-logs\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.256567 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvdq4\" (UniqueName: \"kubernetes.io/projected/a7a8743c-1051-4955-86f7-9ddf8f356459-kube-api-access-pvdq4\") pod \"nova-cell1-novncproxy-0\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.264163 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.269741 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.270201 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.270565 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-logs\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.277779 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.282634 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-config-data\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.316618 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.343496 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.355973 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmmqb\" (UniqueName: \"kubernetes.io/projected/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-kube-api-access-wmmqb\") pod \"nova-api-0\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.360716 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hv9n\" (UniqueName: \"kubernetes.io/projected/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-kube-api-access-8hv9n\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.360764 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.360843 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-config-data\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.360960 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-logs\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.371043 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvdq4\" (UniqueName: \"kubernetes.io/projected/a7a8743c-1051-4955-86f7-9ddf8f356459-kube-api-access-pvdq4\") pod \"nova-cell1-novncproxy-0\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.383123 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.385646 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.412947 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.413098 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.431298 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.467502 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.468515 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-config-data\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.468678 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-logs\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.468725 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hv9n\" (UniqueName: \"kubernetes.io/projected/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-kube-api-access-8hv9n\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.468747 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.470150 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-94fc6bf75-vk787"] Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.478592 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.480546 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-logs\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.482984 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.496641 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-config-data\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.507512 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hv9n\" (UniqueName: \"kubernetes.io/projected/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-kube-api-access-8hv9n\") pod \"nova-metadata-0\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.529290 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-94fc6bf75-vk787"] Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.572554 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jd9lj\" (UniqueName: \"kubernetes.io/projected/8b5a886b-7cbf-4031-8ef9-dd05641aa966-kube-api-access-jd9lj\") pod \"nova-scheduler-0\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.572823 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.572952 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-config-data\") pod \"nova-scheduler-0\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.675503 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-nb\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.675582 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-svc\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.675635 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jd9lj\" (UniqueName: \"kubernetes.io/projected/8b5a886b-7cbf-4031-8ef9-dd05641aa966-kube-api-access-jd9lj\") pod \"nova-scheduler-0\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.675656 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.675689 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-config\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.675731 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzjrf\" (UniqueName: \"kubernetes.io/projected/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-kube-api-access-rzjrf\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.675767 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-config-data\") pod \"nova-scheduler-0\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.675786 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-swift-storage-0\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.675808 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-sb\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.692652 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-config-data\") pod \"nova-scheduler-0\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.714238 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jd9lj\" (UniqueName: \"kubernetes.io/projected/8b5a886b-7cbf-4031-8ef9-dd05641aa966-kube-api-access-jd9lj\") pod \"nova-scheduler-0\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.714606 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.767927 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.808777 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.809933 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-config\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.810085 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzjrf\" (UniqueName: \"kubernetes.io/projected/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-kube-api-access-rzjrf\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.810194 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-swift-storage-0\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.810226 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-sb\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.810321 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-nb\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.810416 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-svc\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.811223 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-config\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.811924 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-svc\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.815880 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-swift-storage-0\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.817707 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-sb\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.818355 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-nb\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.839723 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzjrf\" (UniqueName: \"kubernetes.io/projected/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-kube-api-access-rzjrf\") pod \"dnsmasq-dns-94fc6bf75-vk787\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:50 crc kubenswrapper[4492]: I1126 07:07:50.976162 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerStarted","Data":"e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89"} Nov 26 07:07:51 crc kubenswrapper[4492]: I1126 07:07:51.138425 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:51 crc kubenswrapper[4492]: I1126 07:07:51.219152 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:07:51 crc kubenswrapper[4492]: I1126 07:07:51.343261 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-9ftsf"] Nov 26 07:07:51 crc kubenswrapper[4492]: I1126 07:07:51.462810 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:07:51 crc kubenswrapper[4492]: I1126 07:07:51.569874 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:07:51 crc kubenswrapper[4492]: I1126 07:07:51.721825 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:07:51 crc kubenswrapper[4492]: I1126 07:07:51.874436 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-94fc6bf75-vk787"] Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.000545 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a7a8743c-1051-4955-86f7-9ddf8f356459","Type":"ContainerStarted","Data":"2c1b0c751752e61554519adf8fe0cc813c7d43865b18db5265bed50d876bfcb2"} Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.003574 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9ftsf" event={"ID":"6ca524b1-6cc4-4333-a4fe-724793248c10","Type":"ContainerStarted","Data":"36aaf449a28c128ddd286a6f461f0647680fb7b4437971dfabdfb4417ce8432a"} Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.003618 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9ftsf" event={"ID":"6ca524b1-6cc4-4333-a4fe-724793248c10","Type":"ContainerStarted","Data":"6212acad36e0d49a49fad9e81394e34c3cb97275d4cbe8e950c13f1fe9c47f47"} Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.014390 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" event={"ID":"0ac796a9-7e9c-44f1-9fe4-0b7457c53334","Type":"ContainerStarted","Data":"8c5ee183c02b662c29e381a1534ded3934ed7d5b6689e270f9638134e5c87141"} Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.018412 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b2ffed-86f2-432c-852f-a99a0a7a9f5a","Type":"ContainerStarted","Data":"2d4fae5fe5a7a8a9a1d7794d4dba0a16baede9fa2baa804e642bce0ab973d9f1"} Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.025283 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8b5a886b-7cbf-4031-8ef9-dd05641aa966","Type":"ContainerStarted","Data":"5078d092631d2be6ed524e97230faf922e50347f876b361a785b57c58a693217"} Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.029043 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39","Type":"ContainerStarted","Data":"c00964ee6ed0e65f696f480ba3a668e781e46e52cb287a8a568cb8197376b38b"} Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.037474 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-9ftsf" podStartSLOduration=3.037450636 podStartE2EDuration="3.037450636s" podCreationTimestamp="2025-11-26 07:07:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:52.030125012 +0000 UTC m=+1167.914013311" watchObservedRunningTime="2025-11-26 07:07:52.037450636 +0000 UTC m=+1167.921338934" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.133790 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9nfbl"] Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.135553 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.141799 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.142042 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.183366 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9nfbl"] Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.257147 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.257606 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w9rp\" (UniqueName: \"kubernetes.io/projected/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-kube-api-access-6w9rp\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.257708 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-scripts\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.257754 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-config-data\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.359931 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w9rp\" (UniqueName: \"kubernetes.io/projected/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-kube-api-access-6w9rp\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.360116 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-scripts\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.360214 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-config-data\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.360341 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.364851 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-config-data\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.365584 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-scripts\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.370806 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.387724 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w9rp\" (UniqueName: \"kubernetes.io/projected/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-kube-api-access-6w9rp\") pod \"nova-cell1-conductor-db-sync-9nfbl\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:52 crc kubenswrapper[4492]: I1126 07:07:52.473466 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:07:53 crc kubenswrapper[4492]: I1126 07:07:53.065239 4492 generic.go:334] "Generic (PLEG): container finished" podID="0ac796a9-7e9c-44f1-9fe4-0b7457c53334" containerID="d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c" exitCode=0 Nov 26 07:07:53 crc kubenswrapper[4492]: I1126 07:07:53.065376 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" event={"ID":"0ac796a9-7e9c-44f1-9fe4-0b7457c53334","Type":"ContainerDied","Data":"d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c"} Nov 26 07:07:53 crc kubenswrapper[4492]: I1126 07:07:53.086222 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerStarted","Data":"1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c"} Nov 26 07:07:53 crc kubenswrapper[4492]: I1126 07:07:53.086382 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:07:53 crc kubenswrapper[4492]: I1126 07:07:53.123540 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9nfbl"] Nov 26 07:07:53 crc kubenswrapper[4492]: I1126 07:07:53.124577 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.083047143 podStartE2EDuration="7.12456288s" podCreationTimestamp="2025-11-26 07:07:46 +0000 UTC" firstStartedPulling="2025-11-26 07:07:47.837089047 +0000 UTC m=+1163.720977334" lastFinishedPulling="2025-11-26 07:07:51.878604773 +0000 UTC m=+1167.762493071" observedRunningTime="2025-11-26 07:07:53.111038392 +0000 UTC m=+1168.994926691" watchObservedRunningTime="2025-11-26 07:07:53.12456288 +0000 UTC m=+1169.008451178" Nov 26 07:07:53 crc kubenswrapper[4492]: W1126 07:07:53.130102 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6622ed2f_d8aa_4bc0_8a5a_73c0ed329035.slice/crio-116e4c8249a86759b3754230eb36a73fa9f3d03a107ed618f006fb0db6cb8dba WatchSource:0}: Error finding container 116e4c8249a86759b3754230eb36a73fa9f3d03a107ed618f006fb0db6cb8dba: Status 404 returned error can't find the container with id 116e4c8249a86759b3754230eb36a73fa9f3d03a107ed618f006fb0db6cb8dba Nov 26 07:07:54 crc kubenswrapper[4492]: I1126 07:07:54.094475 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9nfbl" event={"ID":"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035","Type":"ContainerStarted","Data":"c85b3534b450b9c74a8b2d7534da2321ddb780d2e2d451888bc5f927c5b4a6ba"} Nov 26 07:07:54 crc kubenswrapper[4492]: I1126 07:07:54.094783 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9nfbl" event={"ID":"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035","Type":"ContainerStarted","Data":"116e4c8249a86759b3754230eb36a73fa9f3d03a107ed618f006fb0db6cb8dba"} Nov 26 07:07:54 crc kubenswrapper[4492]: I1126 07:07:54.097635 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" event={"ID":"0ac796a9-7e9c-44f1-9fe4-0b7457c53334","Type":"ContainerStarted","Data":"5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9"} Nov 26 07:07:54 crc kubenswrapper[4492]: I1126 07:07:54.097781 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:07:54 crc kubenswrapper[4492]: I1126 07:07:54.116803 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-9nfbl" podStartSLOduration=2.116781305 podStartE2EDuration="2.116781305s" podCreationTimestamp="2025-11-26 07:07:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:54.113907342 +0000 UTC m=+1169.997795641" watchObservedRunningTime="2025-11-26 07:07:54.116781305 +0000 UTC m=+1170.000669604" Nov 26 07:07:54 crc kubenswrapper[4492]: I1126 07:07:54.140682 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" podStartSLOduration=4.140658255 podStartE2EDuration="4.140658255s" podCreationTimestamp="2025-11-26 07:07:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:07:54.129905781 +0000 UTC m=+1170.013794079" watchObservedRunningTime="2025-11-26 07:07:54.140658255 +0000 UTC m=+1170.024546554" Nov 26 07:07:54 crc kubenswrapper[4492]: I1126 07:07:54.366920 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:07:54 crc kubenswrapper[4492]: I1126 07:07:54.478688 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:07:56 crc kubenswrapper[4492]: I1126 07:07:56.119367 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a7a8743c-1051-4955-86f7-9ddf8f356459","Type":"ContainerStarted","Data":"ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c"} Nov 26 07:07:56 crc kubenswrapper[4492]: I1126 07:07:56.119982 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="a7a8743c-1051-4955-86f7-9ddf8f356459" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c" gracePeriod=30 Nov 26 07:07:56 crc kubenswrapper[4492]: I1126 07:07:56.124018 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b2ffed-86f2-432c-852f-a99a0a7a9f5a","Type":"ContainerStarted","Data":"2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c"} Nov 26 07:07:56 crc kubenswrapper[4492]: I1126 07:07:56.125833 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8b5a886b-7cbf-4031-8ef9-dd05641aa966","Type":"ContainerStarted","Data":"4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3"} Nov 26 07:07:56 crc kubenswrapper[4492]: I1126 07:07:56.128505 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39","Type":"ContainerStarted","Data":"19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735"} Nov 26 07:07:56 crc kubenswrapper[4492]: I1126 07:07:56.137821 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.70977948 podStartE2EDuration="7.137804343s" podCreationTimestamp="2025-11-26 07:07:49 +0000 UTC" firstStartedPulling="2025-11-26 07:07:51.249325841 +0000 UTC m=+1167.133214139" lastFinishedPulling="2025-11-26 07:07:55.677350704 +0000 UTC m=+1171.561239002" observedRunningTime="2025-11-26 07:07:56.131746964 +0000 UTC m=+1172.015635262" watchObservedRunningTime="2025-11-26 07:07:56.137804343 +0000 UTC m=+1172.021692641" Nov 26 07:07:56 crc kubenswrapper[4492]: I1126 07:07:56.156277 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.181171171 podStartE2EDuration="6.156263918s" podCreationTimestamp="2025-11-26 07:07:50 +0000 UTC" firstStartedPulling="2025-11-26 07:07:51.730501228 +0000 UTC m=+1167.614389525" lastFinishedPulling="2025-11-26 07:07:55.705593984 +0000 UTC m=+1171.589482272" observedRunningTime="2025-11-26 07:07:56.148648731 +0000 UTC m=+1172.032537029" watchObservedRunningTime="2025-11-26 07:07:56.156263918 +0000 UTC m=+1172.040152206" Nov 26 07:07:57 crc kubenswrapper[4492]: I1126 07:07:57.143293 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b2ffed-86f2-432c-852f-a99a0a7a9f5a","Type":"ContainerStarted","Data":"72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6"} Nov 26 07:07:57 crc kubenswrapper[4492]: I1126 07:07:57.143802 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerName="nova-metadata-log" containerID="cri-o://2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c" gracePeriod=30 Nov 26 07:07:57 crc kubenswrapper[4492]: I1126 07:07:57.144198 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerName="nova-metadata-metadata" containerID="cri-o://72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6" gracePeriod=30 Nov 26 07:07:57 crc kubenswrapper[4492]: I1126 07:07:57.152999 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39","Type":"ContainerStarted","Data":"b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513"} Nov 26 07:07:57 crc kubenswrapper[4492]: I1126 07:07:57.179814 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.083622778 podStartE2EDuration="7.179763956s" podCreationTimestamp="2025-11-26 07:07:50 +0000 UTC" firstStartedPulling="2025-11-26 07:07:51.609530884 +0000 UTC m=+1167.493419182" lastFinishedPulling="2025-11-26 07:07:55.705672062 +0000 UTC m=+1171.589560360" observedRunningTime="2025-11-26 07:07:57.177780316 +0000 UTC m=+1173.061668614" watchObservedRunningTime="2025-11-26 07:07:57.179763956 +0000 UTC m=+1173.063652254" Nov 26 07:07:57 crc kubenswrapper[4492]: I1126 07:07:57.202293 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.019030391 podStartE2EDuration="8.202278625s" podCreationTimestamp="2025-11-26 07:07:49 +0000 UTC" firstStartedPulling="2025-11-26 07:07:51.525965585 +0000 UTC m=+1167.409853883" lastFinishedPulling="2025-11-26 07:07:55.709213819 +0000 UTC m=+1171.593102117" observedRunningTime="2025-11-26 07:07:57.190522272 +0000 UTC m=+1173.074410570" watchObservedRunningTime="2025-11-26 07:07:57.202278625 +0000 UTC m=+1173.086166923" Nov 26 07:07:57 crc kubenswrapper[4492]: I1126 07:07:57.831617 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.007143 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-logs\") pod \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.007291 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-config-data\") pod \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.007546 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hv9n\" (UniqueName: \"kubernetes.io/projected/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-kube-api-access-8hv9n\") pod \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.007553 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-logs" (OuterVolumeSpecName: "logs") pod "42b2ffed-86f2-432c-852f-a99a0a7a9f5a" (UID: "42b2ffed-86f2-432c-852f-a99a0a7a9f5a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.008156 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-combined-ca-bundle\") pod \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\" (UID: \"42b2ffed-86f2-432c-852f-a99a0a7a9f5a\") " Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.008636 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.014439 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-kube-api-access-8hv9n" (OuterVolumeSpecName: "kube-api-access-8hv9n") pod "42b2ffed-86f2-432c-852f-a99a0a7a9f5a" (UID: "42b2ffed-86f2-432c-852f-a99a0a7a9f5a"). InnerVolumeSpecName "kube-api-access-8hv9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.032745 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42b2ffed-86f2-432c-852f-a99a0a7a9f5a" (UID: "42b2ffed-86f2-432c-852f-a99a0a7a9f5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.047118 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-config-data" (OuterVolumeSpecName: "config-data") pod "42b2ffed-86f2-432c-852f-a99a0a7a9f5a" (UID: "42b2ffed-86f2-432c-852f-a99a0a7a9f5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.109981 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.110010 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hv9n\" (UniqueName: \"kubernetes.io/projected/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-kube-api-access-8hv9n\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.110022 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b2ffed-86f2-432c-852f-a99a0a7a9f5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.160370 4492 generic.go:334] "Generic (PLEG): container finished" podID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerID="72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6" exitCode=0 Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.160402 4492 generic.go:334] "Generic (PLEG): container finished" podID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerID="2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c" exitCode=143 Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.161162 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.163244 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b2ffed-86f2-432c-852f-a99a0a7a9f5a","Type":"ContainerDied","Data":"72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6"} Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.163298 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b2ffed-86f2-432c-852f-a99a0a7a9f5a","Type":"ContainerDied","Data":"2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c"} Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.163311 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"42b2ffed-86f2-432c-852f-a99a0a7a9f5a","Type":"ContainerDied","Data":"2d4fae5fe5a7a8a9a1d7794d4dba0a16baede9fa2baa804e642bce0ab973d9f1"} Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.163362 4492 scope.go:117] "RemoveContainer" containerID="72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.197860 4492 scope.go:117] "RemoveContainer" containerID="2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.201060 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.208345 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.225991 4492 scope.go:117] "RemoveContainer" containerID="72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6" Nov 26 07:07:58 crc kubenswrapper[4492]: E1126 07:07:58.226485 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6\": container with ID starting with 72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6 not found: ID does not exist" containerID="72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.226531 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6"} err="failed to get container status \"72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6\": rpc error: code = NotFound desc = could not find container \"72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6\": container with ID starting with 72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6 not found: ID does not exist" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.226562 4492 scope.go:117] "RemoveContainer" containerID="2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c" Nov 26 07:07:58 crc kubenswrapper[4492]: E1126 07:07:58.226896 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c\": container with ID starting with 2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c not found: ID does not exist" containerID="2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.226929 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c"} err="failed to get container status \"2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c\": rpc error: code = NotFound desc = could not find container \"2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c\": container with ID starting with 2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c not found: ID does not exist" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.226959 4492 scope.go:117] "RemoveContainer" containerID="72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.227243 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6"} err="failed to get container status \"72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6\": rpc error: code = NotFound desc = could not find container \"72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6\": container with ID starting with 72fd7dc264ad84069e01d097b0d93eb12c31cd5cef61a42a9aa08339b485eec6 not found: ID does not exist" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.227280 4492 scope.go:117] "RemoveContainer" containerID="2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.227589 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c"} err="failed to get container status \"2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c\": rpc error: code = NotFound desc = could not find container \"2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c\": container with ID starting with 2f5fc03d258c6fae5cddbc0fec970166ab2eeae4f712fc6fb00be2a0f85b0e7c not found: ID does not exist" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.232991 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:07:58 crc kubenswrapper[4492]: E1126 07:07:58.233492 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerName="nova-metadata-metadata" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.233512 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerName="nova-metadata-metadata" Nov 26 07:07:58 crc kubenswrapper[4492]: E1126 07:07:58.233553 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerName="nova-metadata-log" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.233560 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerName="nova-metadata-log" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.233750 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerName="nova-metadata-log" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.233769 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" containerName="nova-metadata-metadata" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.234834 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.237829 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.238031 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.263128 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.418206 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f71ed76-fe8e-428b-811f-458e61ba2fc8-logs\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.418693 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-config-data\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.418752 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wtqg\" (UniqueName: \"kubernetes.io/projected/8f71ed76-fe8e-428b-811f-458e61ba2fc8-kube-api-access-2wtqg\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.418786 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.418868 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.450527 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42b2ffed-86f2-432c-852f-a99a0a7a9f5a" path="/var/lib/kubelet/pods/42b2ffed-86f2-432c-852f-a99a0a7a9f5a/volumes" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.521313 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-config-data\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.521376 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wtqg\" (UniqueName: \"kubernetes.io/projected/8f71ed76-fe8e-428b-811f-458e61ba2fc8-kube-api-access-2wtqg\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.521418 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.522447 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.522644 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f71ed76-fe8e-428b-811f-458e61ba2fc8-logs\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.523421 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f71ed76-fe8e-428b-811f-458e61ba2fc8-logs\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.526312 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.528246 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.534726 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-config-data\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.540184 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wtqg\" (UniqueName: \"kubernetes.io/projected/8f71ed76-fe8e-428b-811f-458e61ba2fc8-kube-api-access-2wtqg\") pod \"nova-metadata-0\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " pod="openstack/nova-metadata-0" Nov 26 07:07:58 crc kubenswrapper[4492]: I1126 07:07:58.562507 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:07:59 crc kubenswrapper[4492]: I1126 07:07:59.036788 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:07:59 crc kubenswrapper[4492]: I1126 07:07:59.171926 4492 generic.go:334] "Generic (PLEG): container finished" podID="6622ed2f-d8aa-4bc0-8a5a-73c0ed329035" containerID="c85b3534b450b9c74a8b2d7534da2321ddb780d2e2d451888bc5f927c5b4a6ba" exitCode=0 Nov 26 07:07:59 crc kubenswrapper[4492]: I1126 07:07:59.172033 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9nfbl" event={"ID":"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035","Type":"ContainerDied","Data":"c85b3534b450b9c74a8b2d7534da2321ddb780d2e2d451888bc5f927c5b4a6ba"} Nov 26 07:07:59 crc kubenswrapper[4492]: I1126 07:07:59.175823 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8f71ed76-fe8e-428b-811f-458e61ba2fc8","Type":"ContainerStarted","Data":"380bf1d13df40d050021c59123c972f556a41b72903a51c1b4f04b79e2b40e44"} Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.188351 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8f71ed76-fe8e-428b-811f-458e61ba2fc8","Type":"ContainerStarted","Data":"18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5"} Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.188696 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8f71ed76-fe8e-428b-811f-458e61ba2fc8","Type":"ContainerStarted","Data":"a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46"} Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.220131 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.220111621 podStartE2EDuration="2.220111621s" podCreationTimestamp="2025-11-26 07:07:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:00.209597022 +0000 UTC m=+1176.093485310" watchObservedRunningTime="2025-11-26 07:08:00.220111621 +0000 UTC m=+1176.103999919" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.413641 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.433249 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.433324 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.502013 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.669123 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-combined-ca-bundle\") pod \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.669202 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-scripts\") pod \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.669235 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6w9rp\" (UniqueName: \"kubernetes.io/projected/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-kube-api-access-6w9rp\") pod \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.669310 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-config-data\") pod \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\" (UID: \"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035\") " Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.676971 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-scripts" (OuterVolumeSpecName: "scripts") pod "6622ed2f-d8aa-4bc0-8a5a-73c0ed329035" (UID: "6622ed2f-d8aa-4bc0-8a5a-73c0ed329035"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.677557 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-kube-api-access-6w9rp" (OuterVolumeSpecName: "kube-api-access-6w9rp") pod "6622ed2f-d8aa-4bc0-8a5a-73c0ed329035" (UID: "6622ed2f-d8aa-4bc0-8a5a-73c0ed329035"). InnerVolumeSpecName "kube-api-access-6w9rp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.709478 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6622ed2f-d8aa-4bc0-8a5a-73c0ed329035" (UID: "6622ed2f-d8aa-4bc0-8a5a-73c0ed329035"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.722661 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-config-data" (OuterVolumeSpecName: "config-data") pod "6622ed2f-d8aa-4bc0-8a5a-73c0ed329035" (UID: "6622ed2f-d8aa-4bc0-8a5a-73c0ed329035"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.774987 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.775017 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.775029 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.775039 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6w9rp\" (UniqueName: \"kubernetes.io/projected/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035-kube-api-access-6w9rp\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.810392 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.811593 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 07:08:00 crc kubenswrapper[4492]: I1126 07:08:00.839444 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.140245 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.193472 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74d7dcb8f-sgmxq"] Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.193729 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" podUID="d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" containerName="dnsmasq-dns" containerID="cri-o://3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a" gracePeriod=10 Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.232309 4492 generic.go:334] "Generic (PLEG): container finished" podID="6ca524b1-6cc4-4333-a4fe-724793248c10" containerID="36aaf449a28c128ddd286a6f461f0647680fb7b4437971dfabdfb4417ce8432a" exitCode=0 Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.232391 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9ftsf" event={"ID":"6ca524b1-6cc4-4333-a4fe-724793248c10","Type":"ContainerDied","Data":"36aaf449a28c128ddd286a6f461f0647680fb7b4437971dfabdfb4417ce8432a"} Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.250759 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9nfbl" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.251661 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9nfbl" event={"ID":"6622ed2f-d8aa-4bc0-8a5a-73c0ed329035","Type":"ContainerDied","Data":"116e4c8249a86759b3754230eb36a73fa9f3d03a107ed618f006fb0db6cb8dba"} Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.251696 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="116e4c8249a86759b3754230eb36a73fa9f3d03a107ed618f006fb0db6cb8dba" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.296675 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.361073 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 07:08:01 crc kubenswrapper[4492]: E1126 07:08:01.361478 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6622ed2f-d8aa-4bc0-8a5a-73c0ed329035" containerName="nova-cell1-conductor-db-sync" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.361497 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="6622ed2f-d8aa-4bc0-8a5a-73c0ed329035" containerName="nova-cell1-conductor-db-sync" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.361709 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="6622ed2f-d8aa-4bc0-8a5a-73c0ed329035" containerName="nova-cell1-conductor-db-sync" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.362453 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.365094 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.388290 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.407164 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f403bbb4-0c46-4b60-b7a2-78a0238e91e9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f403bbb4-0c46-4b60-b7a2-78a0238e91e9\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.407247 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sscl6\" (UniqueName: \"kubernetes.io/projected/f403bbb4-0c46-4b60-b7a2-78a0238e91e9-kube-api-access-sscl6\") pod \"nova-cell1-conductor-0\" (UID: \"f403bbb4-0c46-4b60-b7a2-78a0238e91e9\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.407284 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f403bbb4-0c46-4b60-b7a2-78a0238e91e9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f403bbb4-0c46-4b60-b7a2-78a0238e91e9\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.509767 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f403bbb4-0c46-4b60-b7a2-78a0238e91e9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f403bbb4-0c46-4b60-b7a2-78a0238e91e9\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.509831 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sscl6\" (UniqueName: \"kubernetes.io/projected/f403bbb4-0c46-4b60-b7a2-78a0238e91e9-kube-api-access-sscl6\") pod \"nova-cell1-conductor-0\" (UID: \"f403bbb4-0c46-4b60-b7a2-78a0238e91e9\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.509866 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f403bbb4-0c46-4b60-b7a2-78a0238e91e9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f403bbb4-0c46-4b60-b7a2-78a0238e91e9\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.514142 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f403bbb4-0c46-4b60-b7a2-78a0238e91e9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f403bbb4-0c46-4b60-b7a2-78a0238e91e9\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.516509 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.202:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.516830 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.202:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.528718 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f403bbb4-0c46-4b60-b7a2-78a0238e91e9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f403bbb4-0c46-4b60-b7a2-78a0238e91e9\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.552625 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sscl6\" (UniqueName: \"kubernetes.io/projected/f403bbb4-0c46-4b60-b7a2-78a0238e91e9-kube-api-access-sscl6\") pod \"nova-cell1-conductor-0\" (UID: \"f403bbb4-0c46-4b60-b7a2-78a0238e91e9\") " pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.687596 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.700554 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.717527 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-swift-storage-0\") pod \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.717576 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-config\") pod \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.717657 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-nb\") pod \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.717753 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-svc\") pod \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.717800 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxv5q\" (UniqueName: \"kubernetes.io/projected/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-kube-api-access-nxv5q\") pod \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.717839 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-sb\") pod \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\" (UID: \"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14\") " Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.751420 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-kube-api-access-nxv5q" (OuterVolumeSpecName: "kube-api-access-nxv5q") pod "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" (UID: "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14"). InnerVolumeSpecName "kube-api-access-nxv5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.780495 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" (UID: "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.800673 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" (UID: "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.821163 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.821215 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxv5q\" (UniqueName: \"kubernetes.io/projected/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-kube-api-access-nxv5q\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.821229 4492 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.828742 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" (UID: "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.829721 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" (UID: "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.855824 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-config" (OuterVolumeSpecName: "config") pod "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" (UID: "d0edf899-a089-4aca-8ad3-d8e6f8b8cc14"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.921923 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.921964 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:01 crc kubenswrapper[4492]: I1126 07:08:01.921975 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.258656 4492 generic.go:334] "Generic (PLEG): container finished" podID="d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" containerID="3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a" exitCode=0 Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.258877 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" event={"ID":"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14","Type":"ContainerDied","Data":"3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a"} Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.258957 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" event={"ID":"d0edf899-a089-4aca-8ad3-d8e6f8b8cc14","Type":"ContainerDied","Data":"d29b4479f6f3b9325622d7e437409b90c32911b70f6add57b8711428951cf6b3"} Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.258983 4492 scope.go:117] "RemoveContainer" containerID="3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.259222 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74d7dcb8f-sgmxq" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.334977 4492 scope.go:117] "RemoveContainer" containerID="f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.372226 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.398135 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74d7dcb8f-sgmxq"] Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.412981 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74d7dcb8f-sgmxq"] Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.420236 4492 scope.go:117] "RemoveContainer" containerID="3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a" Nov 26 07:08:02 crc kubenswrapper[4492]: E1126 07:08:02.421445 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a\": container with ID starting with 3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a not found: ID does not exist" containerID="3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.421474 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a"} err="failed to get container status \"3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a\": rpc error: code = NotFound desc = could not find container \"3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a\": container with ID starting with 3195d622466a8c8147de6e39591bbec307d6b9523bbcfc2ea741d0b8bcd7dc3a not found: ID does not exist" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.421500 4492 scope.go:117] "RemoveContainer" containerID="f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da" Nov 26 07:08:02 crc kubenswrapper[4492]: E1126 07:08:02.421884 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da\": container with ID starting with f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da not found: ID does not exist" containerID="f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.421924 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da"} err="failed to get container status \"f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da\": rpc error: code = NotFound desc = could not find container \"f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da\": container with ID starting with f02aa7f846834abe0c9f8f3dd208d46c5ced272ebc593add3e506378268ec9da not found: ID does not exist" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.459897 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" path="/var/lib/kubelet/pods/d0edf899-a089-4aca-8ad3-d8e6f8b8cc14/volumes" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.613205 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.750720 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-scripts\") pod \"6ca524b1-6cc4-4333-a4fe-724793248c10\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.750929 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-combined-ca-bundle\") pod \"6ca524b1-6cc4-4333-a4fe-724793248c10\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.751125 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtkzp\" (UniqueName: \"kubernetes.io/projected/6ca524b1-6cc4-4333-a4fe-724793248c10-kube-api-access-xtkzp\") pod \"6ca524b1-6cc4-4333-a4fe-724793248c10\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.751232 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-config-data\") pod \"6ca524b1-6cc4-4333-a4fe-724793248c10\" (UID: \"6ca524b1-6cc4-4333-a4fe-724793248c10\") " Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.760533 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-scripts" (OuterVolumeSpecName: "scripts") pod "6ca524b1-6cc4-4333-a4fe-724793248c10" (UID: "6ca524b1-6cc4-4333-a4fe-724793248c10"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.760865 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ca524b1-6cc4-4333-a4fe-724793248c10-kube-api-access-xtkzp" (OuterVolumeSpecName: "kube-api-access-xtkzp") pod "6ca524b1-6cc4-4333-a4fe-724793248c10" (UID: "6ca524b1-6cc4-4333-a4fe-724793248c10"). InnerVolumeSpecName "kube-api-access-xtkzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.778558 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ca524b1-6cc4-4333-a4fe-724793248c10" (UID: "6ca524b1-6cc4-4333-a4fe-724793248c10"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.800117 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-config-data" (OuterVolumeSpecName: "config-data") pod "6ca524b1-6cc4-4333-a4fe-724793248c10" (UID: "6ca524b1-6cc4-4333-a4fe-724793248c10"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.854396 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.854507 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtkzp\" (UniqueName: \"kubernetes.io/projected/6ca524b1-6cc4-4333-a4fe-724793248c10-kube-api-access-xtkzp\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.854574 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:02 crc kubenswrapper[4492]: I1126 07:08:02.854638 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ca524b1-6cc4-4333-a4fe-724793248c10-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.271802 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9ftsf" event={"ID":"6ca524b1-6cc4-4333-a4fe-724793248c10","Type":"ContainerDied","Data":"6212acad36e0d49a49fad9e81394e34c3cb97275d4cbe8e950c13f1fe9c47f47"} Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.272257 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6212acad36e0d49a49fad9e81394e34c3cb97275d4cbe8e950c13f1fe9c47f47" Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.271846 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9ftsf" Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.275706 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f403bbb4-0c46-4b60-b7a2-78a0238e91e9","Type":"ContainerStarted","Data":"44bc9ca0006e72b368a242e03aa0b70f877957bc18699dd130e56f0dfc06e15c"} Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.275772 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f403bbb4-0c46-4b60-b7a2-78a0238e91e9","Type":"ContainerStarted","Data":"b544b6e0b8431fa3111a8b994ef1818bf599afdce20f0d220face9c20210124e"} Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.275823 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.311018 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.310998463 podStartE2EDuration="2.310998463s" podCreationTimestamp="2025-11-26 07:08:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:03.300001517 +0000 UTC m=+1179.183889815" watchObservedRunningTime="2025-11-26 07:08:03.310998463 +0000 UTC m=+1179.194886761" Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.458007 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.458274 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-log" containerID="cri-o://19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735" gracePeriod=30 Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.458488 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-api" containerID="cri-o://b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513" gracePeriod=30 Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.466706 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.532641 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.532863 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerName="nova-metadata-log" containerID="cri-o://a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46" gracePeriod=30 Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.533601 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerName="nova-metadata-metadata" containerID="cri-o://18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5" gracePeriod=30 Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.563862 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:08:03 crc kubenswrapper[4492]: I1126 07:08:03.563930 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.117706 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.187817 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-combined-ca-bundle\") pod \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.187947 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-nova-metadata-tls-certs\") pod \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.187982 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wtqg\" (UniqueName: \"kubernetes.io/projected/8f71ed76-fe8e-428b-811f-458e61ba2fc8-kube-api-access-2wtqg\") pod \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.188067 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f71ed76-fe8e-428b-811f-458e61ba2fc8-logs\") pod \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.188100 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-config-data\") pod \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\" (UID: \"8f71ed76-fe8e-428b-811f-458e61ba2fc8\") " Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.191663 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f71ed76-fe8e-428b-811f-458e61ba2fc8-logs" (OuterVolumeSpecName: "logs") pod "8f71ed76-fe8e-428b-811f-458e61ba2fc8" (UID: "8f71ed76-fe8e-428b-811f-458e61ba2fc8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.195024 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f71ed76-fe8e-428b-811f-458e61ba2fc8-kube-api-access-2wtqg" (OuterVolumeSpecName: "kube-api-access-2wtqg") pod "8f71ed76-fe8e-428b-811f-458e61ba2fc8" (UID: "8f71ed76-fe8e-428b-811f-458e61ba2fc8"). InnerVolumeSpecName "kube-api-access-2wtqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.219782 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-config-data" (OuterVolumeSpecName: "config-data") pod "8f71ed76-fe8e-428b-811f-458e61ba2fc8" (UID: "8f71ed76-fe8e-428b-811f-458e61ba2fc8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.235227 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f71ed76-fe8e-428b-811f-458e61ba2fc8" (UID: "8f71ed76-fe8e-428b-811f-458e61ba2fc8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.263460 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "8f71ed76-fe8e-428b-811f-458e61ba2fc8" (UID: "8f71ed76-fe8e-428b-811f-458e61ba2fc8"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.290994 4492 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.291020 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wtqg\" (UniqueName: \"kubernetes.io/projected/8f71ed76-fe8e-428b-811f-458e61ba2fc8-kube-api-access-2wtqg\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.291053 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f71ed76-fe8e-428b-811f-458e61ba2fc8-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.291066 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.291077 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f71ed76-fe8e-428b-811f-458e61ba2fc8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.295249 4492 generic.go:334] "Generic (PLEG): container finished" podID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerID="19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735" exitCode=143 Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.295325 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39","Type":"ContainerDied","Data":"19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735"} Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.296988 4492 generic.go:334] "Generic (PLEG): container finished" podID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerID="18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5" exitCode=0 Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.297004 4492 generic.go:334] "Generic (PLEG): container finished" podID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerID="a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46" exitCode=143 Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.298468 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.299781 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8f71ed76-fe8e-428b-811f-458e61ba2fc8","Type":"ContainerDied","Data":"18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5"} Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.299901 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8f71ed76-fe8e-428b-811f-458e61ba2fc8","Type":"ContainerDied","Data":"a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46"} Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.299918 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8f71ed76-fe8e-428b-811f-458e61ba2fc8","Type":"ContainerDied","Data":"380bf1d13df40d050021c59123c972f556a41b72903a51c1b4f04b79e2b40e44"} Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.300016 4492 scope.go:117] "RemoveContainer" containerID="18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.299958 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="8b5a886b-7cbf-4031-8ef9-dd05641aa966" containerName="nova-scheduler-scheduler" containerID="cri-o://4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3" gracePeriod=30 Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.360087 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.373275 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.394827 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:04 crc kubenswrapper[4492]: E1126 07:08:04.395602 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ca524b1-6cc4-4333-a4fe-724793248c10" containerName="nova-manage" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.395622 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ca524b1-6cc4-4333-a4fe-724793248c10" containerName="nova-manage" Nov 26 07:08:04 crc kubenswrapper[4492]: E1126 07:08:04.395638 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" containerName="init" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.395645 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" containerName="init" Nov 26 07:08:04 crc kubenswrapper[4492]: E1126 07:08:04.395658 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerName="nova-metadata-metadata" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.395664 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerName="nova-metadata-metadata" Nov 26 07:08:04 crc kubenswrapper[4492]: E1126 07:08:04.395715 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerName="nova-metadata-log" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.395724 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerName="nova-metadata-log" Nov 26 07:08:04 crc kubenswrapper[4492]: E1126 07:08:04.395740 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" containerName="dnsmasq-dns" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.395746 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" containerName="dnsmasq-dns" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.400996 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerName="nova-metadata-metadata" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.401020 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ca524b1-6cc4-4333-a4fe-724793248c10" containerName="nova-manage" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.401037 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" containerName="nova-metadata-log" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.401054 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0edf899-a089-4aca-8ad3-d8e6f8b8cc14" containerName="dnsmasq-dns" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.404146 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.405717 4492 scope.go:117] "RemoveContainer" containerID="a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.406028 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.408313 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.408542 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.450023 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f71ed76-fe8e-428b-811f-458e61ba2fc8" path="/var/lib/kubelet/pods/8f71ed76-fe8e-428b-811f-458e61ba2fc8/volumes" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.453321 4492 scope.go:117] "RemoveContainer" containerID="18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5" Nov 26 07:08:04 crc kubenswrapper[4492]: E1126 07:08:04.454016 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5\": container with ID starting with 18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5 not found: ID does not exist" containerID="18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.454066 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5"} err="failed to get container status \"18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5\": rpc error: code = NotFound desc = could not find container \"18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5\": container with ID starting with 18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5 not found: ID does not exist" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.454097 4492 scope.go:117] "RemoveContainer" containerID="a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46" Nov 26 07:08:04 crc kubenswrapper[4492]: E1126 07:08:04.454468 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46\": container with ID starting with a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46 not found: ID does not exist" containerID="a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.454509 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46"} err="failed to get container status \"a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46\": rpc error: code = NotFound desc = could not find container \"a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46\": container with ID starting with a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46 not found: ID does not exist" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.454522 4492 scope.go:117] "RemoveContainer" containerID="18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.454873 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5"} err="failed to get container status \"18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5\": rpc error: code = NotFound desc = could not find container \"18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5\": container with ID starting with 18fff44e7f8edb80c2d457b9cfa095985bea102d920008838d87b64fb3c07cb5 not found: ID does not exist" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.454902 4492 scope.go:117] "RemoveContainer" containerID="a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.455218 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46"} err="failed to get container status \"a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46\": rpc error: code = NotFound desc = could not find container \"a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46\": container with ID starting with a4924a7bce4f45a5694044c534cf5ede7f0152992d7f0e1bac3870bb6daade46 not found: ID does not exist" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.600257 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.600816 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ace53bbf-c004-4557-ad30-eac2ac4f64ea-logs\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.600944 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4hbj\" (UniqueName: \"kubernetes.io/projected/ace53bbf-c004-4557-ad30-eac2ac4f64ea-kube-api-access-g4hbj\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.601056 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.601319 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-config-data\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.702848 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-config-data\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.703006 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.703131 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ace53bbf-c004-4557-ad30-eac2ac4f64ea-logs\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.703202 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4hbj\" (UniqueName: \"kubernetes.io/projected/ace53bbf-c004-4557-ad30-eac2ac4f64ea-kube-api-access-g4hbj\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.703262 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.703728 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ace53bbf-c004-4557-ad30-eac2ac4f64ea-logs\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.706918 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-config-data\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.707052 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.712968 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.718080 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4hbj\" (UniqueName: \"kubernetes.io/projected/ace53bbf-c004-4557-ad30-eac2ac4f64ea-kube-api-access-g4hbj\") pod \"nova-metadata-0\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " pod="openstack/nova-metadata-0" Nov 26 07:08:04 crc kubenswrapper[4492]: I1126 07:08:04.728782 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:08:05 crc kubenswrapper[4492]: I1126 07:08:05.172990 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:05 crc kubenswrapper[4492]: W1126 07:08:05.188557 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podace53bbf_c004_4557_ad30_eac2ac4f64ea.slice/crio-7c88123ee23029eb1d855406828df9b392a8268ae13d6e0c36b1cf3e32c6468b WatchSource:0}: Error finding container 7c88123ee23029eb1d855406828df9b392a8268ae13d6e0c36b1cf3e32c6468b: Status 404 returned error can't find the container with id 7c88123ee23029eb1d855406828df9b392a8268ae13d6e0c36b1cf3e32c6468b Nov 26 07:08:05 crc kubenswrapper[4492]: I1126 07:08:05.310212 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ace53bbf-c004-4557-ad30-eac2ac4f64ea","Type":"ContainerStarted","Data":"7c88123ee23029eb1d855406828df9b392a8268ae13d6e0c36b1cf3e32c6468b"} Nov 26 07:08:05 crc kubenswrapper[4492]: E1126 07:08:05.812531 4492 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:08:05 crc kubenswrapper[4492]: E1126 07:08:05.815136 4492 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:08:05 crc kubenswrapper[4492]: E1126 07:08:05.816390 4492 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:08:05 crc kubenswrapper[4492]: E1126 07:08:05.816428 4492 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="8b5a886b-7cbf-4031-8ef9-dd05641aa966" containerName="nova-scheduler-scheduler" Nov 26 07:08:06 crc kubenswrapper[4492]: I1126 07:08:06.325012 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ace53bbf-c004-4557-ad30-eac2ac4f64ea","Type":"ContainerStarted","Data":"0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247"} Nov 26 07:08:06 crc kubenswrapper[4492]: I1126 07:08:06.325093 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ace53bbf-c004-4557-ad30-eac2ac4f64ea","Type":"ContainerStarted","Data":"66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455"} Nov 26 07:08:06 crc kubenswrapper[4492]: I1126 07:08:06.354952 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.354925067 podStartE2EDuration="2.354925067s" podCreationTimestamp="2025-11-26 07:08:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:06.342038149 +0000 UTC m=+1182.225926446" watchObservedRunningTime="2025-11-26 07:08:06.354925067 +0000 UTC m=+1182.238813365" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.032784 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.158496 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-combined-ca-bundle\") pod \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.158578 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-config-data\") pod \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.158650 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-logs\") pod \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.158847 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmmqb\" (UniqueName: \"kubernetes.io/projected/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-kube-api-access-wmmqb\") pod \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\" (UID: \"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39\") " Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.159602 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-logs" (OuterVolumeSpecName: "logs") pod "1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" (UID: "1ad0cb8d-3495-4b1d-b959-1f1efbad9e39"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.166742 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-kube-api-access-wmmqb" (OuterVolumeSpecName: "kube-api-access-wmmqb") pod "1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" (UID: "1ad0cb8d-3495-4b1d-b959-1f1efbad9e39"). InnerVolumeSpecName "kube-api-access-wmmqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.184668 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-config-data" (OuterVolumeSpecName: "config-data") pod "1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" (UID: "1ad0cb8d-3495-4b1d-b959-1f1efbad9e39"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.185165 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" (UID: "1ad0cb8d-3495-4b1d-b959-1f1efbad9e39"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.262015 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.262043 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.262055 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.262065 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmmqb\" (UniqueName: \"kubernetes.io/projected/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39-kube-api-access-wmmqb\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.336815 4492 generic.go:334] "Generic (PLEG): container finished" podID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerID="b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513" exitCode=0 Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.338339 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.340255 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39","Type":"ContainerDied","Data":"b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513"} Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.340327 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1ad0cb8d-3495-4b1d-b959-1f1efbad9e39","Type":"ContainerDied","Data":"c00964ee6ed0e65f696f480ba3a668e781e46e52cb287a8a568cb8197376b38b"} Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.340358 4492 scope.go:117] "RemoveContainer" containerID="b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.376272 4492 scope.go:117] "RemoveContainer" containerID="19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.394516 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.404654 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.412475 4492 scope.go:117] "RemoveContainer" containerID="b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513" Nov 26 07:08:07 crc kubenswrapper[4492]: E1126 07:08:07.412921 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513\": container with ID starting with b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513 not found: ID does not exist" containerID="b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.413000 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513"} err="failed to get container status \"b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513\": rpc error: code = NotFound desc = could not find container \"b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513\": container with ID starting with b7f08f0a232c42764e8fcfd4eca65c847ef8d2f7649ddcb29ad0e7dd3708c513 not found: ID does not exist" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.413030 4492 scope.go:117] "RemoveContainer" containerID="19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735" Nov 26 07:08:07 crc kubenswrapper[4492]: E1126 07:08:07.413401 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735\": container with ID starting with 19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735 not found: ID does not exist" containerID="19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.413442 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735"} err="failed to get container status \"19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735\": rpc error: code = NotFound desc = could not find container \"19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735\": container with ID starting with 19a2c80feaee79a44f965236318e73861b4c1ce749b49142eaa6c42f09495735 not found: ID does not exist" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.426031 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:07 crc kubenswrapper[4492]: E1126 07:08:07.426479 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-log" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.426499 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-log" Nov 26 07:08:07 crc kubenswrapper[4492]: E1126 07:08:07.426527 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-api" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.426534 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-api" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.426710 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-api" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.426743 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" containerName="nova-api-log" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.428735 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.430944 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.436053 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.576054 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1344418-ee16-445c-9a21-0cdd9f4e480d-logs\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.576244 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.576440 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4vjq\" (UniqueName: \"kubernetes.io/projected/b1344418-ee16-445c-9a21-0cdd9f4e480d-kube-api-access-v4vjq\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.577162 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-config-data\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.680996 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4vjq\" (UniqueName: \"kubernetes.io/projected/b1344418-ee16-445c-9a21-0cdd9f4e480d-kube-api-access-v4vjq\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.681067 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-config-data\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.681144 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1344418-ee16-445c-9a21-0cdd9f4e480d-logs\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.681217 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.682296 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1344418-ee16-445c-9a21-0cdd9f4e480d-logs\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.693496 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-config-data\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.699138 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.730618 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4vjq\" (UniqueName: \"kubernetes.io/projected/b1344418-ee16-445c-9a21-0cdd9f4e480d-kube-api-access-v4vjq\") pod \"nova-api-0\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.780252 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.782065 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-combined-ca-bundle\") pod \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.782159 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-config-data\") pod \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.782217 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jd9lj\" (UniqueName: \"kubernetes.io/projected/8b5a886b-7cbf-4031-8ef9-dd05641aa966-kube-api-access-jd9lj\") pod \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\" (UID: \"8b5a886b-7cbf-4031-8ef9-dd05641aa966\") " Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.786691 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b5a886b-7cbf-4031-8ef9-dd05641aa966-kube-api-access-jd9lj" (OuterVolumeSpecName: "kube-api-access-jd9lj") pod "8b5a886b-7cbf-4031-8ef9-dd05641aa966" (UID: "8b5a886b-7cbf-4031-8ef9-dd05641aa966"). InnerVolumeSpecName "kube-api-access-jd9lj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.802096 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.815387 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8b5a886b-7cbf-4031-8ef9-dd05641aa966" (UID: "8b5a886b-7cbf-4031-8ef9-dd05641aa966"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.823945 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-config-data" (OuterVolumeSpecName: "config-data") pod "8b5a886b-7cbf-4031-8ef9-dd05641aa966" (UID: "8b5a886b-7cbf-4031-8ef9-dd05641aa966"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.884285 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jd9lj\" (UniqueName: \"kubernetes.io/projected/8b5a886b-7cbf-4031-8ef9-dd05641aa966-kube-api-access-jd9lj\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.884315 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:07 crc kubenswrapper[4492]: I1126 07:08:07.884325 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b5a886b-7cbf-4031-8ef9-dd05641aa966-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.231876 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.350783 4492 generic.go:334] "Generic (PLEG): container finished" podID="8b5a886b-7cbf-4031-8ef9-dd05641aa966" containerID="4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3" exitCode=0 Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.350845 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.350899 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8b5a886b-7cbf-4031-8ef9-dd05641aa966","Type":"ContainerDied","Data":"4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3"} Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.350953 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8b5a886b-7cbf-4031-8ef9-dd05641aa966","Type":"ContainerDied","Data":"5078d092631d2be6ed524e97230faf922e50347f876b361a785b57c58a693217"} Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.350978 4492 scope.go:117] "RemoveContainer" containerID="4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.355963 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1344418-ee16-445c-9a21-0cdd9f4e480d","Type":"ContainerStarted","Data":"7b58b21823e9b214b0962298647844bd0938929acc14cb39e613faf96fb1d1be"} Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.379066 4492 scope.go:117] "RemoveContainer" containerID="4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3" Nov 26 07:08:08 crc kubenswrapper[4492]: E1126 07:08:08.379533 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3\": container with ID starting with 4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3 not found: ID does not exist" containerID="4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.379597 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3"} err="failed to get container status \"4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3\": rpc error: code = NotFound desc = could not find container \"4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3\": container with ID starting with 4a7d6092891a8dcd6fe680de9d3c46dc98ccfc65267acd1b08a997939cf065d3 not found: ID does not exist" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.380439 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.386626 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.412505 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:08 crc kubenswrapper[4492]: E1126 07:08:08.413141 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b5a886b-7cbf-4031-8ef9-dd05641aa966" containerName="nova-scheduler-scheduler" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.413182 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b5a886b-7cbf-4031-8ef9-dd05641aa966" containerName="nova-scheduler-scheduler" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.413415 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b5a886b-7cbf-4031-8ef9-dd05641aa966" containerName="nova-scheduler-scheduler" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.414306 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.416747 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.421596 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.461237 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ad0cb8d-3495-4b1d-b959-1f1efbad9e39" path="/var/lib/kubelet/pods/1ad0cb8d-3495-4b1d-b959-1f1efbad9e39/volumes" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.462278 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b5a886b-7cbf-4031-8ef9-dd05641aa966" path="/var/lib/kubelet/pods/8b5a886b-7cbf-4031-8ef9-dd05641aa966/volumes" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.503573 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqwpb\" (UniqueName: \"kubernetes.io/projected/53275682-2daa-41b3-a4b8-daaf0156e239-kube-api-access-sqwpb\") pod \"nova-scheduler-0\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.503791 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-config-data\") pod \"nova-scheduler-0\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.503983 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.605571 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-config-data\") pod \"nova-scheduler-0\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.605626 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.605713 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqwpb\" (UniqueName: \"kubernetes.io/projected/53275682-2daa-41b3-a4b8-daaf0156e239-kube-api-access-sqwpb\") pod \"nova-scheduler-0\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.610061 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-config-data\") pod \"nova-scheduler-0\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.614904 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.622697 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqwpb\" (UniqueName: \"kubernetes.io/projected/53275682-2daa-41b3-a4b8-daaf0156e239-kube-api-access-sqwpb\") pod \"nova-scheduler-0\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:08 crc kubenswrapper[4492]: I1126 07:08:08.740132 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:08:09 crc kubenswrapper[4492]: I1126 07:08:09.172086 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:09 crc kubenswrapper[4492]: W1126 07:08:09.174132 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53275682_2daa_41b3_a4b8_daaf0156e239.slice/crio-7d7e206f4ac7559857fa00e63a15c6dfebbcd19eb483065a853268acfe0625cd WatchSource:0}: Error finding container 7d7e206f4ac7559857fa00e63a15c6dfebbcd19eb483065a853268acfe0625cd: Status 404 returned error can't find the container with id 7d7e206f4ac7559857fa00e63a15c6dfebbcd19eb483065a853268acfe0625cd Nov 26 07:08:09 crc kubenswrapper[4492]: I1126 07:08:09.367993 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"53275682-2daa-41b3-a4b8-daaf0156e239","Type":"ContainerStarted","Data":"9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58"} Nov 26 07:08:09 crc kubenswrapper[4492]: I1126 07:08:09.368272 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"53275682-2daa-41b3-a4b8-daaf0156e239","Type":"ContainerStarted","Data":"7d7e206f4ac7559857fa00e63a15c6dfebbcd19eb483065a853268acfe0625cd"} Nov 26 07:08:09 crc kubenswrapper[4492]: I1126 07:08:09.371112 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1344418-ee16-445c-9a21-0cdd9f4e480d","Type":"ContainerStarted","Data":"d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd"} Nov 26 07:08:09 crc kubenswrapper[4492]: I1126 07:08:09.371140 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1344418-ee16-445c-9a21-0cdd9f4e480d","Type":"ContainerStarted","Data":"849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a"} Nov 26 07:08:09 crc kubenswrapper[4492]: I1126 07:08:09.388029 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.388017552 podStartE2EDuration="1.388017552s" podCreationTimestamp="2025-11-26 07:08:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:09.383285394 +0000 UTC m=+1185.267173692" watchObservedRunningTime="2025-11-26 07:08:09.388017552 +0000 UTC m=+1185.271905850" Nov 26 07:08:09 crc kubenswrapper[4492]: I1126 07:08:09.404111 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.404093846 podStartE2EDuration="2.404093846s" podCreationTimestamp="2025-11-26 07:08:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:09.402294092 +0000 UTC m=+1185.286182390" watchObservedRunningTime="2025-11-26 07:08:09.404093846 +0000 UTC m=+1185.287982134" Nov 26 07:08:09 crc kubenswrapper[4492]: I1126 07:08:09.729888 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:08:09 crc kubenswrapper[4492]: I1126 07:08:09.729955 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:08:11 crc kubenswrapper[4492]: I1126 07:08:11.729277 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 07:08:13 crc kubenswrapper[4492]: I1126 07:08:13.740248 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 07:08:14 crc kubenswrapper[4492]: I1126 07:08:14.729551 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 07:08:14 crc kubenswrapper[4492]: I1126 07:08:14.729870 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 07:08:15 crc kubenswrapper[4492]: I1126 07:08:15.746317 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.209:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:08:15 crc kubenswrapper[4492]: I1126 07:08:15.746354 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.209:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:08:17 crc kubenswrapper[4492]: I1126 07:08:17.412695 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 07:08:17 crc kubenswrapper[4492]: I1126 07:08:17.803316 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:08:17 crc kubenswrapper[4492]: I1126 07:08:17.803394 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:08:18 crc kubenswrapper[4492]: I1126 07:08:18.740901 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 07:08:18 crc kubenswrapper[4492]: I1126 07:08:18.764032 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 07:08:18 crc kubenswrapper[4492]: I1126 07:08:18.886873 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.210:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 07:08:18 crc kubenswrapper[4492]: I1126 07:08:18.887205 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.210:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 07:08:19 crc kubenswrapper[4492]: I1126 07:08:19.516902 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.048476 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.049128 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="40ff3c7f-2bdf-42be-bcde-659ad3f15ca5" containerName="kube-state-metrics" containerID="cri-o://cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d" gracePeriod=30 Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.470979 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.482086 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjsxl\" (UniqueName: \"kubernetes.io/projected/40ff3c7f-2bdf-42be-bcde-659ad3f15ca5-kube-api-access-pjsxl\") pod \"40ff3c7f-2bdf-42be-bcde-659ad3f15ca5\" (UID: \"40ff3c7f-2bdf-42be-bcde-659ad3f15ca5\") " Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.492236 4492 generic.go:334] "Generic (PLEG): container finished" podID="40ff3c7f-2bdf-42be-bcde-659ad3f15ca5" containerID="cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d" exitCode=2 Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.492291 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"40ff3c7f-2bdf-42be-bcde-659ad3f15ca5","Type":"ContainerDied","Data":"cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d"} Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.492323 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"40ff3c7f-2bdf-42be-bcde-659ad3f15ca5","Type":"ContainerDied","Data":"97755a404ec6ec5660768195301d3ec736371bff2f40f1e29193dca68bf14e09"} Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.492343 4492 scope.go:117] "RemoveContainer" containerID="cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.492467 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.492804 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40ff3c7f-2bdf-42be-bcde-659ad3f15ca5-kube-api-access-pjsxl" (OuterVolumeSpecName: "kube-api-access-pjsxl") pod "40ff3c7f-2bdf-42be-bcde-659ad3f15ca5" (UID: "40ff3c7f-2bdf-42be-bcde-659ad3f15ca5"). InnerVolumeSpecName "kube-api-access-pjsxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.547476 4492 scope.go:117] "RemoveContainer" containerID="cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d" Nov 26 07:08:21 crc kubenswrapper[4492]: E1126 07:08:21.548222 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d\": container with ID starting with cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d not found: ID does not exist" containerID="cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.548265 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d"} err="failed to get container status \"cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d\": rpc error: code = NotFound desc = could not find container \"cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d\": container with ID starting with cf76ef8746e897a754e3bb429b3630689f062ed5210357362a117677199e0e4d not found: ID does not exist" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.589804 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjsxl\" (UniqueName: \"kubernetes.io/projected/40ff3c7f-2bdf-42be-bcde-659ad3f15ca5-kube-api-access-pjsxl\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.831017 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.843738 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.850805 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:08:21 crc kubenswrapper[4492]: E1126 07:08:21.851222 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40ff3c7f-2bdf-42be-bcde-659ad3f15ca5" containerName="kube-state-metrics" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.851241 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="40ff3c7f-2bdf-42be-bcde-659ad3f15ca5" containerName="kube-state-metrics" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.851427 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="40ff3c7f-2bdf-42be-bcde-659ad3f15ca5" containerName="kube-state-metrics" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.852133 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.854127 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.854701 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.860354 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.897301 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvtd9\" (UniqueName: \"kubernetes.io/projected/9d721534-74ac-4ea4-9583-f96ca7e8f61c-kube-api-access-hvtd9\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.897590 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9d721534-74ac-4ea4-9583-f96ca7e8f61c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.897705 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d721534-74ac-4ea4-9583-f96ca7e8f61c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.897884 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d721534-74ac-4ea4-9583-f96ca7e8f61c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.999598 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d721534-74ac-4ea4-9583-f96ca7e8f61c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.999823 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvtd9\" (UniqueName: \"kubernetes.io/projected/9d721534-74ac-4ea4-9583-f96ca7e8f61c-kube-api-access-hvtd9\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:21 crc kubenswrapper[4492]: I1126 07:08:21.999948 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9d721534-74ac-4ea4-9583-f96ca7e8f61c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.000028 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d721534-74ac-4ea4-9583-f96ca7e8f61c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.003836 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d721534-74ac-4ea4-9583-f96ca7e8f61c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.004540 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d721534-74ac-4ea4-9583-f96ca7e8f61c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.005505 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9d721534-74ac-4ea4-9583-f96ca7e8f61c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.016739 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvtd9\" (UniqueName: \"kubernetes.io/projected/9d721534-74ac-4ea4-9583-f96ca7e8f61c-kube-api-access-hvtd9\") pod \"kube-state-metrics-0\" (UID: \"9d721534-74ac-4ea4-9583-f96ca7e8f61c\") " pod="openstack/kube-state-metrics-0" Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.175367 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.450046 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40ff3c7f-2bdf-42be-bcde-659ad3f15ca5" path="/var/lib/kubelet/pods/40ff3c7f-2bdf-42be-bcde-659ad3f15ca5/volumes" Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.605677 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.768377 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.768710 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="ceilometer-central-agent" containerID="cri-o://4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647" gracePeriod=30 Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.768802 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="proxy-httpd" containerID="cri-o://1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c" gracePeriod=30 Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.768858 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="sg-core" containerID="cri-o://e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89" gracePeriod=30 Nov 26 07:08:22 crc kubenswrapper[4492]: I1126 07:08:22.768884 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="ceilometer-notification-agent" containerID="cri-o://c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153" gracePeriod=30 Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.518655 4492 generic.go:334] "Generic (PLEG): container finished" podID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerID="1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c" exitCode=0 Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.518966 4492 generic.go:334] "Generic (PLEG): container finished" podID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerID="e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89" exitCode=2 Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.518978 4492 generic.go:334] "Generic (PLEG): container finished" podID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerID="4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647" exitCode=0 Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.518875 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerDied","Data":"1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c"} Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.519070 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerDied","Data":"e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89"} Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.519096 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerDied","Data":"4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647"} Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.520221 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d721534-74ac-4ea4-9583-f96ca7e8f61c","Type":"ContainerStarted","Data":"67c205bebc23bbc2dc4d8f06a1372b10c21936fe9d86105fbfee316b9f063b6b"} Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.520271 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d721534-74ac-4ea4-9583-f96ca7e8f61c","Type":"ContainerStarted","Data":"2fcd3278852761664bda29746666a6d0acf08c030bb248ae63e187120bb31215"} Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.520771 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 07:08:23 crc kubenswrapper[4492]: I1126 07:08:23.544165 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.249156217 podStartE2EDuration="2.544152622s" podCreationTimestamp="2025-11-26 07:08:21 +0000 UTC" firstStartedPulling="2025-11-26 07:08:22.614618598 +0000 UTC m=+1198.498506886" lastFinishedPulling="2025-11-26 07:08:22.909614993 +0000 UTC m=+1198.793503291" observedRunningTime="2025-11-26 07:08:23.538367175 +0000 UTC m=+1199.422255473" watchObservedRunningTime="2025-11-26 07:08:23.544152622 +0000 UTC m=+1199.428040910" Nov 26 07:08:24 crc kubenswrapper[4492]: I1126 07:08:24.740334 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 07:08:24 crc kubenswrapper[4492]: I1126 07:08:24.742948 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 07:08:24 crc kubenswrapper[4492]: I1126 07:08:24.755346 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 07:08:25 crc kubenswrapper[4492]: I1126 07:08:25.546462 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.498965 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.555529 4492 generic.go:334] "Generic (PLEG): container finished" podID="a7a8743c-1051-4955-86f7-9ddf8f356459" containerID="ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c" exitCode=137 Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.555590 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.555636 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a7a8743c-1051-4955-86f7-9ddf8f356459","Type":"ContainerDied","Data":"ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c"} Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.555688 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a7a8743c-1051-4955-86f7-9ddf8f356459","Type":"ContainerDied","Data":"2c1b0c751752e61554519adf8fe0cc813c7d43865b18db5265bed50d876bfcb2"} Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.555712 4492 scope.go:117] "RemoveContainer" containerID="ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.573786 4492 scope.go:117] "RemoveContainer" containerID="ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c" Nov 26 07:08:26 crc kubenswrapper[4492]: E1126 07:08:26.574280 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c\": container with ID starting with ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c not found: ID does not exist" containerID="ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.574321 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c"} err="failed to get container status \"ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c\": rpc error: code = NotFound desc = could not find container \"ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c\": container with ID starting with ba807ec63122983bedcd63e4d0c1e99299347c9db49d2d90d2c437c43ce13a5c not found: ID does not exist" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.613960 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-combined-ca-bundle\") pod \"a7a8743c-1051-4955-86f7-9ddf8f356459\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.614327 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-config-data\") pod \"a7a8743c-1051-4955-86f7-9ddf8f356459\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.614459 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvdq4\" (UniqueName: \"kubernetes.io/projected/a7a8743c-1051-4955-86f7-9ddf8f356459-kube-api-access-pvdq4\") pod \"a7a8743c-1051-4955-86f7-9ddf8f356459\" (UID: \"a7a8743c-1051-4955-86f7-9ddf8f356459\") " Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.628295 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7a8743c-1051-4955-86f7-9ddf8f356459-kube-api-access-pvdq4" (OuterVolumeSpecName: "kube-api-access-pvdq4") pod "a7a8743c-1051-4955-86f7-9ddf8f356459" (UID: "a7a8743c-1051-4955-86f7-9ddf8f356459"). InnerVolumeSpecName "kube-api-access-pvdq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.637770 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-config-data" (OuterVolumeSpecName: "config-data") pod "a7a8743c-1051-4955-86f7-9ddf8f356459" (UID: "a7a8743c-1051-4955-86f7-9ddf8f356459"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.638823 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7a8743c-1051-4955-86f7-9ddf8f356459" (UID: "a7a8743c-1051-4955-86f7-9ddf8f356459"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.719884 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.719933 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvdq4\" (UniqueName: \"kubernetes.io/projected/a7a8743c-1051-4955-86f7-9ddf8f356459-kube-api-access-pvdq4\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.719962 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7a8743c-1051-4955-86f7-9ddf8f356459-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.908905 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.939786 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.962111 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:08:26 crc kubenswrapper[4492]: E1126 07:08:26.962595 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7a8743c-1051-4955-86f7-9ddf8f356459" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.962609 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7a8743c-1051-4955-86f7-9ddf8f356459" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.962825 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7a8743c-1051-4955-86f7-9ddf8f356459" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.963627 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.966872 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.966908 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.967669 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 26 07:08:26 crc kubenswrapper[4492]: I1126 07:08:26.969425 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.035853 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-499gk\" (UniqueName: \"kubernetes.io/projected/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-kube-api-access-499gk\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.035919 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.035993 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.036023 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.036138 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.137564 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-499gk\" (UniqueName: \"kubernetes.io/projected/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-kube-api-access-499gk\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.137645 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.137697 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.137748 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.138512 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.144077 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.144503 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.144529 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.145212 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.155415 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-499gk\" (UniqueName: \"kubernetes.io/projected/fdc4ca0e-c4ee-4d95-9823-badf81e0a49a-kube-api-access-499gk\") pod \"nova-cell1-novncproxy-0\" (UID: \"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.283763 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.764640 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.806788 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.807556 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.808052 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 07:08:27 crc kubenswrapper[4492]: I1126 07:08:27.816615 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.020654 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.059860 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-scripts\") pod \"35c37b78-2ced-4321-a6bf-8eabc82419df\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.059947 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-combined-ca-bundle\") pod \"35c37b78-2ced-4321-a6bf-8eabc82419df\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.059988 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srx2j\" (UniqueName: \"kubernetes.io/projected/35c37b78-2ced-4321-a6bf-8eabc82419df-kube-api-access-srx2j\") pod \"35c37b78-2ced-4321-a6bf-8eabc82419df\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.060014 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-run-httpd\") pod \"35c37b78-2ced-4321-a6bf-8eabc82419df\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.060052 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-log-httpd\") pod \"35c37b78-2ced-4321-a6bf-8eabc82419df\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.060076 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-config-data\") pod \"35c37b78-2ced-4321-a6bf-8eabc82419df\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.060125 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-sg-core-conf-yaml\") pod \"35c37b78-2ced-4321-a6bf-8eabc82419df\" (UID: \"35c37b78-2ced-4321-a6bf-8eabc82419df\") " Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.060382 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "35c37b78-2ced-4321-a6bf-8eabc82419df" (UID: "35c37b78-2ced-4321-a6bf-8eabc82419df"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.060517 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "35c37b78-2ced-4321-a6bf-8eabc82419df" (UID: "35c37b78-2ced-4321-a6bf-8eabc82419df"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.074298 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-scripts" (OuterVolumeSpecName: "scripts") pod "35c37b78-2ced-4321-a6bf-8eabc82419df" (UID: "35c37b78-2ced-4321-a6bf-8eabc82419df"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.076318 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35c37b78-2ced-4321-a6bf-8eabc82419df-kube-api-access-srx2j" (OuterVolumeSpecName: "kube-api-access-srx2j") pod "35c37b78-2ced-4321-a6bf-8eabc82419df" (UID: "35c37b78-2ced-4321-a6bf-8eabc82419df"). InnerVolumeSpecName "kube-api-access-srx2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.083855 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "35c37b78-2ced-4321-a6bf-8eabc82419df" (UID: "35c37b78-2ced-4321-a6bf-8eabc82419df"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.141600 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-config-data" (OuterVolumeSpecName: "config-data") pod "35c37b78-2ced-4321-a6bf-8eabc82419df" (UID: "35c37b78-2ced-4321-a6bf-8eabc82419df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.142697 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "35c37b78-2ced-4321-a6bf-8eabc82419df" (UID: "35c37b78-2ced-4321-a6bf-8eabc82419df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.162976 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.163005 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.163022 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srx2j\" (UniqueName: \"kubernetes.io/projected/35c37b78-2ced-4321-a6bf-8eabc82419df-kube-api-access-srx2j\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.163035 4492 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.163045 4492 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/35c37b78-2ced-4321-a6bf-8eabc82419df-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.163055 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.163080 4492 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/35c37b78-2ced-4321-a6bf-8eabc82419df-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.450503 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7a8743c-1051-4955-86f7-9ddf8f356459" path="/var/lib/kubelet/pods/a7a8743c-1051-4955-86f7-9ddf8f356459/volumes" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.576588 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a","Type":"ContainerStarted","Data":"28ad139c507b39f8a893c8eaf6e1cde5060955c8fec5fb283ce048d3810601f6"} Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.576957 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"fdc4ca0e-c4ee-4d95-9823-badf81e0a49a","Type":"ContainerStarted","Data":"b36b632fe0b4ed26c5b77164d98393e3a9f618ee7f7d60777aac9c889f9101d0"} Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.580629 4492 generic.go:334] "Generic (PLEG): container finished" podID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerID="c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153" exitCode=0 Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.580729 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerDied","Data":"c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153"} Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.580798 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"35c37b78-2ced-4321-a6bf-8eabc82419df","Type":"ContainerDied","Data":"f33e411c3fa8d57575946470bf7bf6f62ad25dc3d7085e325a7c9b23eaac277b"} Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.580808 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.580829 4492 scope.go:117] "RemoveContainer" containerID="1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.581210 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.589539 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.608734 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.608719133 podStartE2EDuration="2.608719133s" podCreationTimestamp="2025-11-26 07:08:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:28.600143549 +0000 UTC m=+1204.484031848" watchObservedRunningTime="2025-11-26 07:08:28.608719133 +0000 UTC m=+1204.492607420" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.619151 4492 scope.go:117] "RemoveContainer" containerID="e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.641452 4492 scope.go:117] "RemoveContainer" containerID="c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.645047 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.663826 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.669022 4492 scope.go:117] "RemoveContainer" containerID="4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.680363 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:28 crc kubenswrapper[4492]: E1126 07:08:28.680822 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="ceilometer-central-agent" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.680842 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="ceilometer-central-agent" Nov 26 07:08:28 crc kubenswrapper[4492]: E1126 07:08:28.680881 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="proxy-httpd" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.680887 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="proxy-httpd" Nov 26 07:08:28 crc kubenswrapper[4492]: E1126 07:08:28.680900 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="sg-core" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.680906 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="sg-core" Nov 26 07:08:28 crc kubenswrapper[4492]: E1126 07:08:28.680919 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="ceilometer-notification-agent" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.680925 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="ceilometer-notification-agent" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.681128 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="proxy-httpd" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.681143 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="ceilometer-central-agent" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.681165 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="sg-core" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.681197 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" containerName="ceilometer-notification-agent" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.688773 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.691847 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.705976 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.706188 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.706317 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.742556 4492 scope.go:117] "RemoveContainer" containerID="1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c" Nov 26 07:08:28 crc kubenswrapper[4492]: E1126 07:08:28.771455 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c\": container with ID starting with 1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c not found: ID does not exist" containerID="1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.771497 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c"} err="failed to get container status \"1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c\": rpc error: code = NotFound desc = could not find container \"1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c\": container with ID starting with 1ad9adcbe7c8d67e57a89174b634954539fb2bab3376af76a9cd0f32de5e500c not found: ID does not exist" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.771521 4492 scope.go:117] "RemoveContainer" containerID="e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.778220 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-config-data\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.778263 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.778349 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.778385 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-log-httpd\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.778437 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-scripts\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.778539 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.778631 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmdj6\" (UniqueName: \"kubernetes.io/projected/55692980-c31f-4107-b9d0-7d29d5e72859-kube-api-access-jmdj6\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.778659 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-run-httpd\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: E1126 07:08:28.783188 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89\": container with ID starting with e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89 not found: ID does not exist" containerID="e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.783239 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89"} err="failed to get container status \"e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89\": rpc error: code = NotFound desc = could not find container \"e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89\": container with ID starting with e00afe7e2b6f9d1a75a317963c1a520104f0ff141e036d0910c2cce290b32e89 not found: ID does not exist" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.783267 4492 scope.go:117] "RemoveContainer" containerID="c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153" Nov 26 07:08:28 crc kubenswrapper[4492]: E1126 07:08:28.793387 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153\": container with ID starting with c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153 not found: ID does not exist" containerID="c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.793426 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153"} err="failed to get container status \"c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153\": rpc error: code = NotFound desc = could not find container \"c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153\": container with ID starting with c58b01de2f49ba772ad3b14425f8759863c0e5d4f8c61961592cbd89d97b5153 not found: ID does not exist" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.793452 4492 scope.go:117] "RemoveContainer" containerID="4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647" Nov 26 07:08:28 crc kubenswrapper[4492]: E1126 07:08:28.795325 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647\": container with ID starting with 4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647 not found: ID does not exist" containerID="4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.795379 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647"} err="failed to get container status \"4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647\": rpc error: code = NotFound desc = could not find container \"4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647\": container with ID starting with 4b1e6d21df7c142f00e46814f5efcf0417852d9643f40d338ae4f1861da90647 not found: ID does not exist" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.810513 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7457c658cc-888nk"] Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.812254 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.822377 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7457c658cc-888nk"] Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.883685 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-scripts\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.883735 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-config\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.883825 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-nb\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.883853 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.883922 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmdj6\" (UniqueName: \"kubernetes.io/projected/55692980-c31f-4107-b9d0-7d29d5e72859-kube-api-access-jmdj6\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.883957 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-run-httpd\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.884002 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhthb\" (UniqueName: \"kubernetes.io/projected/49076130-f96d-4cda-8aff-7fade5d53117-kube-api-access-rhthb\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.884041 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-sb\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.884063 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-config-data\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.884080 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.884107 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-swift-storage-0\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.884138 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-svc\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.884154 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.884193 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-log-httpd\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.884917 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-log-httpd\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.887821 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-run-httpd\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.896799 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.899139 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-scripts\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.917198 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-config-data\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.917757 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.918139 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.919230 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmdj6\" (UniqueName: \"kubernetes.io/projected/55692980-c31f-4107-b9d0-7d29d5e72859-kube-api-access-jmdj6\") pod \"ceilometer-0\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " pod="openstack/ceilometer-0" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.985842 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-config\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.986869 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-nb\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.987634 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhthb\" (UniqueName: \"kubernetes.io/projected/49076130-f96d-4cda-8aff-7fade5d53117-kube-api-access-rhthb\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.988051 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-sb\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.988682 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-swift-storage-0\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.989337 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-svc\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.988615 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-sb\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.986771 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-config\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.989286 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-swift-storage-0\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.987483 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-nb\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:28 crc kubenswrapper[4492]: I1126 07:08:28.990244 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-svc\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:29 crc kubenswrapper[4492]: I1126 07:08:29.006318 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:08:29 crc kubenswrapper[4492]: I1126 07:08:29.007594 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhthb\" (UniqueName: \"kubernetes.io/projected/49076130-f96d-4cda-8aff-7fade5d53117-kube-api-access-rhthb\") pod \"dnsmasq-dns-7457c658cc-888nk\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:29 crc kubenswrapper[4492]: I1126 07:08:29.131537 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:29 crc kubenswrapper[4492]: I1126 07:08:29.378518 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:29 crc kubenswrapper[4492]: I1126 07:08:29.591844 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerStarted","Data":"5d6b3cd3df7c197809901b6e42804a2c158a82dc78e5f8ae3a39e458cab87f1b"} Nov 26 07:08:29 crc kubenswrapper[4492]: W1126 07:08:29.873629 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49076130_f96d_4cda_8aff_7fade5d53117.slice/crio-cd0f33471390c4fabb4fc44fbe60a029b86153cd2e48d972e7039b9f3be60565 WatchSource:0}: Error finding container cd0f33471390c4fabb4fc44fbe60a029b86153cd2e48d972e7039b9f3be60565: Status 404 returned error can't find the container with id cd0f33471390c4fabb4fc44fbe60a029b86153cd2e48d972e7039b9f3be60565 Nov 26 07:08:29 crc kubenswrapper[4492]: I1126 07:08:29.883634 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7457c658cc-888nk"] Nov 26 07:08:30 crc kubenswrapper[4492]: I1126 07:08:30.461003 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35c37b78-2ced-4321-a6bf-8eabc82419df" path="/var/lib/kubelet/pods/35c37b78-2ced-4321-a6bf-8eabc82419df/volumes" Nov 26 07:08:30 crc kubenswrapper[4492]: I1126 07:08:30.603291 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerStarted","Data":"fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e"} Nov 26 07:08:30 crc kubenswrapper[4492]: I1126 07:08:30.605000 4492 generic.go:334] "Generic (PLEG): container finished" podID="49076130-f96d-4cda-8aff-7fade5d53117" containerID="091453ceffbcdba28c1faf3ceb72d57d44aa2842d16a36389a992d3e00062d50" exitCode=0 Nov 26 07:08:30 crc kubenswrapper[4492]: I1126 07:08:30.605101 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7457c658cc-888nk" event={"ID":"49076130-f96d-4cda-8aff-7fade5d53117","Type":"ContainerDied","Data":"091453ceffbcdba28c1faf3ceb72d57d44aa2842d16a36389a992d3e00062d50"} Nov 26 07:08:30 crc kubenswrapper[4492]: I1126 07:08:30.605381 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7457c658cc-888nk" event={"ID":"49076130-f96d-4cda-8aff-7fade5d53117","Type":"ContainerStarted","Data":"cd0f33471390c4fabb4fc44fbe60a029b86153cd2e48d972e7039b9f3be60565"} Nov 26 07:08:30 crc kubenswrapper[4492]: I1126 07:08:30.984209 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:31 crc kubenswrapper[4492]: I1126 07:08:31.474073 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:31 crc kubenswrapper[4492]: I1126 07:08:31.622665 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerStarted","Data":"1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3"} Nov 26 07:08:31 crc kubenswrapper[4492]: I1126 07:08:31.626594 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7457c658cc-888nk" event={"ID":"49076130-f96d-4cda-8aff-7fade5d53117","Type":"ContainerStarted","Data":"aaa24f6d80c56bcfd364731903fc5f4cdd90ff4bbee89a586dd6053ee695e885"} Nov 26 07:08:31 crc kubenswrapper[4492]: I1126 07:08:31.626837 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-api" containerID="cri-o://d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd" gracePeriod=30 Nov 26 07:08:31 crc kubenswrapper[4492]: I1126 07:08:31.626782 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-log" containerID="cri-o://849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a" gracePeriod=30 Nov 26 07:08:31 crc kubenswrapper[4492]: I1126 07:08:31.627334 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:32 crc kubenswrapper[4492]: I1126 07:08:32.187563 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 07:08:32 crc kubenswrapper[4492]: I1126 07:08:32.207252 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7457c658cc-888nk" podStartSLOduration=4.20723205 podStartE2EDuration="4.20723205s" podCreationTimestamp="2025-11-26 07:08:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:31.674926966 +0000 UTC m=+1207.558815264" watchObservedRunningTime="2025-11-26 07:08:32.20723205 +0000 UTC m=+1208.091120348" Nov 26 07:08:32 crc kubenswrapper[4492]: I1126 07:08:32.284369 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:32 crc kubenswrapper[4492]: I1126 07:08:32.636813 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerStarted","Data":"d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74"} Nov 26 07:08:32 crc kubenswrapper[4492]: I1126 07:08:32.638363 4492 generic.go:334] "Generic (PLEG): container finished" podID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerID="849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a" exitCode=143 Nov 26 07:08:32 crc kubenswrapper[4492]: I1126 07:08:32.639164 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1344418-ee16-445c-9a21-0cdd9f4e480d","Type":"ContainerDied","Data":"849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a"} Nov 26 07:08:33 crc kubenswrapper[4492]: I1126 07:08:33.651585 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerStarted","Data":"91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06"} Nov 26 07:08:33 crc kubenswrapper[4492]: I1126 07:08:33.651999 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:08:33 crc kubenswrapper[4492]: I1126 07:08:33.651759 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="proxy-httpd" containerID="cri-o://91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06" gracePeriod=30 Nov 26 07:08:33 crc kubenswrapper[4492]: I1126 07:08:33.651810 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="ceilometer-notification-agent" containerID="cri-o://1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3" gracePeriod=30 Nov 26 07:08:33 crc kubenswrapper[4492]: I1126 07:08:33.651759 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="ceilometer-central-agent" containerID="cri-o://fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e" gracePeriod=30 Nov 26 07:08:33 crc kubenswrapper[4492]: I1126 07:08:33.651828 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="sg-core" containerID="cri-o://d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74" gracePeriod=30 Nov 26 07:08:34 crc kubenswrapper[4492]: I1126 07:08:34.676862 4492 generic.go:334] "Generic (PLEG): container finished" podID="55692980-c31f-4107-b9d0-7d29d5e72859" containerID="91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06" exitCode=0 Nov 26 07:08:34 crc kubenswrapper[4492]: I1126 07:08:34.677157 4492 generic.go:334] "Generic (PLEG): container finished" podID="55692980-c31f-4107-b9d0-7d29d5e72859" containerID="d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74" exitCode=2 Nov 26 07:08:34 crc kubenswrapper[4492]: I1126 07:08:34.677167 4492 generic.go:334] "Generic (PLEG): container finished" podID="55692980-c31f-4107-b9d0-7d29d5e72859" containerID="1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3" exitCode=0 Nov 26 07:08:34 crc kubenswrapper[4492]: I1126 07:08:34.677223 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerDied","Data":"91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06"} Nov 26 07:08:34 crc kubenswrapper[4492]: I1126 07:08:34.677253 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerDied","Data":"d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74"} Nov 26 07:08:34 crc kubenswrapper[4492]: I1126 07:08:34.677263 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerDied","Data":"1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3"} Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.508989 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.528954 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.962204195 podStartE2EDuration="8.528914101s" podCreationTimestamp="2025-11-26 07:08:28 +0000 UTC" firstStartedPulling="2025-11-26 07:08:29.435425564 +0000 UTC m=+1205.319313862" lastFinishedPulling="2025-11-26 07:08:33.002135469 +0000 UTC m=+1208.886023768" observedRunningTime="2025-11-26 07:08:33.68503409 +0000 UTC m=+1209.568922389" watchObservedRunningTime="2025-11-26 07:08:36.528914101 +0000 UTC m=+1212.412802399" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.648355 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1344418-ee16-445c-9a21-0cdd9f4e480d-logs\") pod \"b1344418-ee16-445c-9a21-0cdd9f4e480d\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.648497 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-config-data\") pod \"b1344418-ee16-445c-9a21-0cdd9f4e480d\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.648773 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4vjq\" (UniqueName: \"kubernetes.io/projected/b1344418-ee16-445c-9a21-0cdd9f4e480d-kube-api-access-v4vjq\") pod \"b1344418-ee16-445c-9a21-0cdd9f4e480d\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.648841 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1344418-ee16-445c-9a21-0cdd9f4e480d-logs" (OuterVolumeSpecName: "logs") pod "b1344418-ee16-445c-9a21-0cdd9f4e480d" (UID: "b1344418-ee16-445c-9a21-0cdd9f4e480d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.648918 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-combined-ca-bundle\") pod \"b1344418-ee16-445c-9a21-0cdd9f4e480d\" (UID: \"b1344418-ee16-445c-9a21-0cdd9f4e480d\") " Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.649585 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1344418-ee16-445c-9a21-0cdd9f4e480d-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.667825 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1344418-ee16-445c-9a21-0cdd9f4e480d-kube-api-access-v4vjq" (OuterVolumeSpecName: "kube-api-access-v4vjq") pod "b1344418-ee16-445c-9a21-0cdd9f4e480d" (UID: "b1344418-ee16-445c-9a21-0cdd9f4e480d"). InnerVolumeSpecName "kube-api-access-v4vjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.680400 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1344418-ee16-445c-9a21-0cdd9f4e480d" (UID: "b1344418-ee16-445c-9a21-0cdd9f4e480d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.715674 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-config-data" (OuterVolumeSpecName: "config-data") pod "b1344418-ee16-445c-9a21-0cdd9f4e480d" (UID: "b1344418-ee16-445c-9a21-0cdd9f4e480d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.719933 4492 generic.go:334] "Generic (PLEG): container finished" podID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerID="d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd" exitCode=0 Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.720000 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1344418-ee16-445c-9a21-0cdd9f4e480d","Type":"ContainerDied","Data":"d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd"} Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.720037 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b1344418-ee16-445c-9a21-0cdd9f4e480d","Type":"ContainerDied","Data":"7b58b21823e9b214b0962298647844bd0938929acc14cb39e613faf96fb1d1be"} Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.720062 4492 scope.go:117] "RemoveContainer" containerID="d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.720281 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.751706 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4vjq\" (UniqueName: \"kubernetes.io/projected/b1344418-ee16-445c-9a21-0cdd9f4e480d-kube-api-access-v4vjq\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.751812 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.751872 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1344418-ee16-445c-9a21-0cdd9f4e480d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.783598 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.790315 4492 scope.go:117] "RemoveContainer" containerID="849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.797329 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.808439 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:36 crc kubenswrapper[4492]: E1126 07:08:36.808898 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-log" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.808918 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-log" Nov 26 07:08:36 crc kubenswrapper[4492]: E1126 07:08:36.808953 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-api" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.808963 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-api" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.809136 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-api" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.809161 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" containerName="nova-api-log" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.810129 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.814194 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.814672 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.815285 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.820067 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.834347 4492 scope.go:117] "RemoveContainer" containerID="d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd" Nov 26 07:08:36 crc kubenswrapper[4492]: E1126 07:08:36.841477 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd\": container with ID starting with d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd not found: ID does not exist" containerID="d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.841508 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd"} err="failed to get container status \"d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd\": rpc error: code = NotFound desc = could not find container \"d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd\": container with ID starting with d85ac88c9a96cd8ac1fc0f6278eae523ac5756acacfa81f1d6c6ac34fcffb3bd not found: ID does not exist" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.841531 4492 scope.go:117] "RemoveContainer" containerID="849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a" Nov 26 07:08:36 crc kubenswrapper[4492]: E1126 07:08:36.846059 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a\": container with ID starting with 849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a not found: ID does not exist" containerID="849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.846118 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a"} err="failed to get container status \"849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a\": rpc error: code = NotFound desc = could not find container \"849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a\": container with ID starting with 849cf020df0f00d7e36abdbb1c0d4309bb8344fa24557443e09adc9202715e8a not found: ID does not exist" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.955901 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-public-tls-certs\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.956053 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frctl\" (UniqueName: \"kubernetes.io/projected/e2c3d448-6c92-456b-a3db-49b9774ebb58-kube-api-access-frctl\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.956260 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c3d448-6c92-456b-a3db-49b9774ebb58-logs\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.956402 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.956599 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-config-data\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:36 crc kubenswrapper[4492]: I1126 07:08:36.956720 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.059517 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-public-tls-certs\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.059621 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frctl\" (UniqueName: \"kubernetes.io/projected/e2c3d448-6c92-456b-a3db-49b9774ebb58-kube-api-access-frctl\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.059688 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c3d448-6c92-456b-a3db-49b9774ebb58-logs\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.059724 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.059773 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-config-data\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.059811 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.060966 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c3d448-6c92-456b-a3db-49b9774ebb58-logs\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.064912 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.065037 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-public-tls-certs\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.067819 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-config-data\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.068167 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.078211 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frctl\" (UniqueName: \"kubernetes.io/projected/e2c3d448-6c92-456b-a3db-49b9774ebb58-kube-api-access-frctl\") pod \"nova-api-0\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.131655 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.284193 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.349875 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.446248 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.575562 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-combined-ca-bundle\") pod \"55692980-c31f-4107-b9d0-7d29d5e72859\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.575762 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-ceilometer-tls-certs\") pod \"55692980-c31f-4107-b9d0-7d29d5e72859\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.575792 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-log-httpd\") pod \"55692980-c31f-4107-b9d0-7d29d5e72859\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.575823 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmdj6\" (UniqueName: \"kubernetes.io/projected/55692980-c31f-4107-b9d0-7d29d5e72859-kube-api-access-jmdj6\") pod \"55692980-c31f-4107-b9d0-7d29d5e72859\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.575857 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-run-httpd\") pod \"55692980-c31f-4107-b9d0-7d29d5e72859\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.575915 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-config-data\") pod \"55692980-c31f-4107-b9d0-7d29d5e72859\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.575932 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-sg-core-conf-yaml\") pod \"55692980-c31f-4107-b9d0-7d29d5e72859\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.575992 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-scripts\") pod \"55692980-c31f-4107-b9d0-7d29d5e72859\" (UID: \"55692980-c31f-4107-b9d0-7d29d5e72859\") " Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.577689 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "55692980-c31f-4107-b9d0-7d29d5e72859" (UID: "55692980-c31f-4107-b9d0-7d29d5e72859"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.580474 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "55692980-c31f-4107-b9d0-7d29d5e72859" (UID: "55692980-c31f-4107-b9d0-7d29d5e72859"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.600701 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-scripts" (OuterVolumeSpecName: "scripts") pod "55692980-c31f-4107-b9d0-7d29d5e72859" (UID: "55692980-c31f-4107-b9d0-7d29d5e72859"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.600876 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55692980-c31f-4107-b9d0-7d29d5e72859-kube-api-access-jmdj6" (OuterVolumeSpecName: "kube-api-access-jmdj6") pod "55692980-c31f-4107-b9d0-7d29d5e72859" (UID: "55692980-c31f-4107-b9d0-7d29d5e72859"). InnerVolumeSpecName "kube-api-access-jmdj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.609535 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "55692980-c31f-4107-b9d0-7d29d5e72859" (UID: "55692980-c31f-4107-b9d0-7d29d5e72859"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.632508 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "55692980-c31f-4107-b9d0-7d29d5e72859" (UID: "55692980-c31f-4107-b9d0-7d29d5e72859"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.658049 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.658194 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55692980-c31f-4107-b9d0-7d29d5e72859" (UID: "55692980-c31f-4107-b9d0-7d29d5e72859"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:37 crc kubenswrapper[4492]: W1126 07:08:37.660008 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2c3d448_6c92_456b_a3db_49b9774ebb58.slice/crio-17380fc7bbbb16bcf281f3fa4fad5980fc267cec8e82c12a8e82277ebcb1fab2 WatchSource:0}: Error finding container 17380fc7bbbb16bcf281f3fa4fad5980fc267cec8e82c12a8e82277ebcb1fab2: Status 404 returned error can't find the container with id 17380fc7bbbb16bcf281f3fa4fad5980fc267cec8e82c12a8e82277ebcb1fab2 Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.679034 4492 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.679060 4492 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.679071 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmdj6\" (UniqueName: \"kubernetes.io/projected/55692980-c31f-4107-b9d0-7d29d5e72859-kube-api-access-jmdj6\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.679081 4492 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/55692980-c31f-4107-b9d0-7d29d5e72859-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.679091 4492 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.679099 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.679107 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.685845 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-config-data" (OuterVolumeSpecName: "config-data") pod "55692980-c31f-4107-b9d0-7d29d5e72859" (UID: "55692980-c31f-4107-b9d0-7d29d5e72859"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.735413 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c3d448-6c92-456b-a3db-49b9774ebb58","Type":"ContainerStarted","Data":"17380fc7bbbb16bcf281f3fa4fad5980fc267cec8e82c12a8e82277ebcb1fab2"} Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.737505 4492 generic.go:334] "Generic (PLEG): container finished" podID="55692980-c31f-4107-b9d0-7d29d5e72859" containerID="fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e" exitCode=0 Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.737579 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.737597 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerDied","Data":"fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e"} Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.737642 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"55692980-c31f-4107-b9d0-7d29d5e72859","Type":"ContainerDied","Data":"5d6b3cd3df7c197809901b6e42804a2c158a82dc78e5f8ae3a39e458cab87f1b"} Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.737665 4492 scope.go:117] "RemoveContainer" containerID="91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.779698 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.781479 4492 scope.go:117] "RemoveContainer" containerID="d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.782009 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55692980-c31f-4107-b9d0-7d29d5e72859-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.785553 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.798636 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.803890 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:37 crc kubenswrapper[4492]: E1126 07:08:37.804380 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="ceilometer-central-agent" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.804398 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="ceilometer-central-agent" Nov 26 07:08:37 crc kubenswrapper[4492]: E1126 07:08:37.804438 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="sg-core" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.804446 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="sg-core" Nov 26 07:08:37 crc kubenswrapper[4492]: E1126 07:08:37.804457 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="ceilometer-notification-agent" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.804464 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="ceilometer-notification-agent" Nov 26 07:08:37 crc kubenswrapper[4492]: E1126 07:08:37.804481 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="proxy-httpd" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.804486 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="proxy-httpd" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.804678 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="ceilometer-central-agent" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.804704 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="proxy-httpd" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.804716 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="ceilometer-notification-agent" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.804725 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" containerName="sg-core" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.806417 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.809561 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.809753 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.809770 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.838657 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.845734 4492 scope.go:117] "RemoveContainer" containerID="1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.874075 4492 scope.go:117] "RemoveContainer" containerID="fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.894357 4492 scope.go:117] "RemoveContainer" containerID="91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.894820 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: E1126 07:08:37.895323 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06\": container with ID starting with 91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06 not found: ID does not exist" containerID="91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.895597 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06"} err="failed to get container status \"91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06\": rpc error: code = NotFound desc = could not find container \"91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06\": container with ID starting with 91bee3b43548a1372d42c21ff5d5ba9309fcd93cf477e1a9e571925e06239f06 not found: ID does not exist" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.895692 4492 scope.go:117] "RemoveContainer" containerID="d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.895418 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c1e2afd-80af-48dd-87e3-8254332d2635-run-httpd\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.896546 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n54fj\" (UniqueName: \"kubernetes.io/projected/9c1e2afd-80af-48dd-87e3-8254332d2635-kube-api-access-n54fj\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: E1126 07:08:37.897030 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74\": container with ID starting with d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74 not found: ID does not exist" containerID="d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.899504 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74"} err="failed to get container status \"d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74\": rpc error: code = NotFound desc = could not find container \"d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74\": container with ID starting with d881e68ba4f26ef0de94effc6dec27816cf0e5a1e6d0422b72f208d74100fb74 not found: ID does not exist" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.899551 4492 scope.go:117] "RemoveContainer" containerID="1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.897845 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-config-data\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.899716 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.899756 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-scripts\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: E1126 07:08:37.900078 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3\": container with ID starting with 1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3 not found: ID does not exist" containerID="1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.900110 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3"} err="failed to get container status \"1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3\": rpc error: code = NotFound desc = could not find container \"1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3\": container with ID starting with 1c891c52b8129685578a8aab53909a9be88971905a5fe37324712d514e962df3 not found: ID does not exist" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.900131 4492 scope.go:117] "RemoveContainer" containerID="fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.900648 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c1e2afd-80af-48dd-87e3-8254332d2635-log-httpd\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: E1126 07:08:37.900663 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e\": container with ID starting with fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e not found: ID does not exist" containerID="fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.900706 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e"} err="failed to get container status \"fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e\": rpc error: code = NotFound desc = could not find container \"fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e\": container with ID starting with fc22e6ba27e3e4d3b5e8ad4e0b8ce864bd48c9a8af48a02e85eb1ded93d9ed4e not found: ID does not exist" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.900761 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.951030 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-dhdz4"] Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.952955 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.956390 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.957339 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 26 07:08:37 crc kubenswrapper[4492]: I1126 07:08:37.965558 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-dhdz4"] Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.004430 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.004754 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.004883 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c1e2afd-80af-48dd-87e3-8254332d2635-run-httpd\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.004958 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n54fj\" (UniqueName: \"kubernetes.io/projected/9c1e2afd-80af-48dd-87e3-8254332d2635-kube-api-access-n54fj\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.005003 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-config-data\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.005037 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.005070 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-scripts\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.005158 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c1e2afd-80af-48dd-87e3-8254332d2635-log-httpd\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.005549 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c1e2afd-80af-48dd-87e3-8254332d2635-run-httpd\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.005600 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c1e2afd-80af-48dd-87e3-8254332d2635-log-httpd\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.015335 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-scripts\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.015988 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.016290 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-config-data\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.017564 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.018075 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9c1e2afd-80af-48dd-87e3-8254332d2635-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.022990 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n54fj\" (UniqueName: \"kubernetes.io/projected/9c1e2afd-80af-48dd-87e3-8254332d2635-kube-api-access-n54fj\") pod \"ceilometer-0\" (UID: \"9c1e2afd-80af-48dd-87e3-8254332d2635\") " pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.107379 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-config-data\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.107629 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.107869 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9845\" (UniqueName: \"kubernetes.io/projected/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-kube-api-access-s9845\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.108095 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-scripts\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.140267 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.210508 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-config-data\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.210566 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.210636 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9845\" (UniqueName: \"kubernetes.io/projected/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-kube-api-access-s9845\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.210696 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-scripts\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.218352 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.221645 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-scripts\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.222268 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-config-data\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.228628 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9845\" (UniqueName: \"kubernetes.io/projected/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-kube-api-access-s9845\") pod \"nova-cell1-cell-mapping-dhdz4\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.270541 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.451446 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55692980-c31f-4107-b9d0-7d29d5e72859" path="/var/lib/kubelet/pods/55692980-c31f-4107-b9d0-7d29d5e72859/volumes" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.452489 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1344418-ee16-445c-9a21-0cdd9f4e480d" path="/var/lib/kubelet/pods/b1344418-ee16-445c-9a21-0cdd9f4e480d/volumes" Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.682865 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-dhdz4"] Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.768341 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.782034 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c3d448-6c92-456b-a3db-49b9774ebb58","Type":"ContainerStarted","Data":"4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855"} Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.782080 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c3d448-6c92-456b-a3db-49b9774ebb58","Type":"ContainerStarted","Data":"349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761"} Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.789111 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dhdz4" event={"ID":"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09","Type":"ContainerStarted","Data":"cbd48cc6e20d44323e51fe33f40edf022157d6b9fd9cddcdd08f6d66a9e46aff"} Nov 26 07:08:38 crc kubenswrapper[4492]: I1126 07:08:38.819466 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.819451727 podStartE2EDuration="2.819451727s" podCreationTimestamp="2025-11-26 07:08:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:38.814795151 +0000 UTC m=+1214.698683449" watchObservedRunningTime="2025-11-26 07:08:38.819451727 +0000 UTC m=+1214.703340025" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.133336 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.195919 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-94fc6bf75-vk787"] Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.196198 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" podUID="0ac796a9-7e9c-44f1-9fe4-0b7457c53334" containerName="dnsmasq-dns" containerID="cri-o://5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9" gracePeriod=10 Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.653129 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.766649 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-config\") pod \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.766739 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzjrf\" (UniqueName: \"kubernetes.io/projected/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-kube-api-access-rzjrf\") pod \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.766920 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-nb\") pod \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.766969 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-svc\") pod \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.767035 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-swift-storage-0\") pod \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.767214 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-sb\") pod \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\" (UID: \"0ac796a9-7e9c-44f1-9fe4-0b7457c53334\") " Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.784428 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-kube-api-access-rzjrf" (OuterVolumeSpecName: "kube-api-access-rzjrf") pod "0ac796a9-7e9c-44f1-9fe4-0b7457c53334" (UID: "0ac796a9-7e9c-44f1-9fe4-0b7457c53334"). InnerVolumeSpecName "kube-api-access-rzjrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.812338 4492 generic.go:334] "Generic (PLEG): container finished" podID="0ac796a9-7e9c-44f1-9fe4-0b7457c53334" containerID="5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9" exitCode=0 Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.812418 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" event={"ID":"0ac796a9-7e9c-44f1-9fe4-0b7457c53334","Type":"ContainerDied","Data":"5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9"} Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.812470 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" event={"ID":"0ac796a9-7e9c-44f1-9fe4-0b7457c53334","Type":"ContainerDied","Data":"8c5ee183c02b662c29e381a1534ded3934ed7d5b6689e270f9638134e5c87141"} Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.812490 4492 scope.go:117] "RemoveContainer" containerID="5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.812671 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-94fc6bf75-vk787" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.822532 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dhdz4" event={"ID":"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09","Type":"ContainerStarted","Data":"967bab7794e098a626be43b5e37a02c3e091858de521fd6111b078bfa5a66c07"} Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.824302 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0ac796a9-7e9c-44f1-9fe4-0b7457c53334" (UID: "0ac796a9-7e9c-44f1-9fe4-0b7457c53334"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.825268 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c1e2afd-80af-48dd-87e3-8254332d2635","Type":"ContainerStarted","Data":"4c068ccfefc1cd9bf042e5a81ab5c322c1d106565c4ca2d591610d8496d2ec47"} Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.825291 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c1e2afd-80af-48dd-87e3-8254332d2635","Type":"ContainerStarted","Data":"bd771177f3e5bf204a949998edc3f9eda6c49d80e17047777af2bd97a69c8ed1"} Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.835832 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-config" (OuterVolumeSpecName: "config") pod "0ac796a9-7e9c-44f1-9fe4-0b7457c53334" (UID: "0ac796a9-7e9c-44f1-9fe4-0b7457c53334"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.839361 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-dhdz4" podStartSLOduration=2.839351776 podStartE2EDuration="2.839351776s" podCreationTimestamp="2025-11-26 07:08:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:39.835147401 +0000 UTC m=+1215.719035699" watchObservedRunningTime="2025-11-26 07:08:39.839351776 +0000 UTC m=+1215.723240074" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.846035 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0ac796a9-7e9c-44f1-9fe4-0b7457c53334" (UID: "0ac796a9-7e9c-44f1-9fe4-0b7457c53334"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.850414 4492 scope.go:117] "RemoveContainer" containerID="d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.859599 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0ac796a9-7e9c-44f1-9fe4-0b7457c53334" (UID: "0ac796a9-7e9c-44f1-9fe4-0b7457c53334"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.862096 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0ac796a9-7e9c-44f1-9fe4-0b7457c53334" (UID: "0ac796a9-7e9c-44f1-9fe4-0b7457c53334"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.875625 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.875662 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzjrf\" (UniqueName: \"kubernetes.io/projected/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-kube-api-access-rzjrf\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.875678 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.875687 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.875696 4492 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.875705 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ac796a9-7e9c-44f1-9fe4-0b7457c53334-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.878338 4492 scope.go:117] "RemoveContainer" containerID="5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9" Nov 26 07:08:39 crc kubenswrapper[4492]: E1126 07:08:39.878781 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9\": container with ID starting with 5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9 not found: ID does not exist" containerID="5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.878820 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9"} err="failed to get container status \"5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9\": rpc error: code = NotFound desc = could not find container \"5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9\": container with ID starting with 5c14b2b57b88803924fbc9e792a07ca17cb71fd423df6a0ba4792ff589a485f9 not found: ID does not exist" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.878845 4492 scope.go:117] "RemoveContainer" containerID="d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c" Nov 26 07:08:39 crc kubenswrapper[4492]: E1126 07:08:39.879344 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c\": container with ID starting with d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c not found: ID does not exist" containerID="d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c" Nov 26 07:08:39 crc kubenswrapper[4492]: I1126 07:08:39.879372 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c"} err="failed to get container status \"d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c\": rpc error: code = NotFound desc = could not find container \"d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c\": container with ID starting with d5b721849adc77dfb7bfc630454eba4fdc40d719030ce4c33932fc67097dc95c not found: ID does not exist" Nov 26 07:08:40 crc kubenswrapper[4492]: I1126 07:08:40.152058 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-94fc6bf75-vk787"] Nov 26 07:08:40 crc kubenswrapper[4492]: I1126 07:08:40.159010 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-94fc6bf75-vk787"] Nov 26 07:08:40 crc kubenswrapper[4492]: I1126 07:08:40.447410 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ac796a9-7e9c-44f1-9fe4-0b7457c53334" path="/var/lib/kubelet/pods/0ac796a9-7e9c-44f1-9fe4-0b7457c53334/volumes" Nov 26 07:08:40 crc kubenswrapper[4492]: I1126 07:08:40.841767 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c1e2afd-80af-48dd-87e3-8254332d2635","Type":"ContainerStarted","Data":"bfbb81b0e157480f33b83019197d7e11138faf9856c6f44f1ec20349e40a58d3"} Nov 26 07:08:41 crc kubenswrapper[4492]: I1126 07:08:41.854651 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c1e2afd-80af-48dd-87e3-8254332d2635","Type":"ContainerStarted","Data":"68da588f65c2d7ae839c8239240a8acc1026dea471fbb39250b6dedc30ea74aa"} Nov 26 07:08:42 crc kubenswrapper[4492]: I1126 07:08:42.868216 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c1e2afd-80af-48dd-87e3-8254332d2635","Type":"ContainerStarted","Data":"da7f3ed7640a7452ee453751e37e86ed90a640560a7a214d18a2bd4fb8e65e54"} Nov 26 07:08:42 crc kubenswrapper[4492]: I1126 07:08:42.869034 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 07:08:42 crc kubenswrapper[4492]: I1126 07:08:42.889667 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.104915579 podStartE2EDuration="5.889653275s" podCreationTimestamp="2025-11-26 07:08:37 +0000 UTC" firstStartedPulling="2025-11-26 07:08:38.849306285 +0000 UTC m=+1214.733194582" lastFinishedPulling="2025-11-26 07:08:42.63404398 +0000 UTC m=+1218.517932278" observedRunningTime="2025-11-26 07:08:42.882365192 +0000 UTC m=+1218.766253490" watchObservedRunningTime="2025-11-26 07:08:42.889653275 +0000 UTC m=+1218.773541573" Nov 26 07:08:44 crc kubenswrapper[4492]: I1126 07:08:44.894237 4492 generic.go:334] "Generic (PLEG): container finished" podID="74c85bf8-cdcf-4e64-83cf-5f62d3c90b09" containerID="967bab7794e098a626be43b5e37a02c3e091858de521fd6111b078bfa5a66c07" exitCode=0 Nov 26 07:08:44 crc kubenswrapper[4492]: I1126 07:08:44.895897 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dhdz4" event={"ID":"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09","Type":"ContainerDied","Data":"967bab7794e098a626be43b5e37a02c3e091858de521fd6111b078bfa5a66c07"} Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.215730 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.349386 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-scripts\") pod \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.349451 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-config-data\") pod \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.349777 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-combined-ca-bundle\") pod \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.349910 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9845\" (UniqueName: \"kubernetes.io/projected/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-kube-api-access-s9845\") pod \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\" (UID: \"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09\") " Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.362739 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-scripts" (OuterVolumeSpecName: "scripts") pod "74c85bf8-cdcf-4e64-83cf-5f62d3c90b09" (UID: "74c85bf8-cdcf-4e64-83cf-5f62d3c90b09"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.372567 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-kube-api-access-s9845" (OuterVolumeSpecName: "kube-api-access-s9845") pod "74c85bf8-cdcf-4e64-83cf-5f62d3c90b09" (UID: "74c85bf8-cdcf-4e64-83cf-5f62d3c90b09"). InnerVolumeSpecName "kube-api-access-s9845". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.384299 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-config-data" (OuterVolumeSpecName: "config-data") pod "74c85bf8-cdcf-4e64-83cf-5f62d3c90b09" (UID: "74c85bf8-cdcf-4e64-83cf-5f62d3c90b09"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.399268 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74c85bf8-cdcf-4e64-83cf-5f62d3c90b09" (UID: "74c85bf8-cdcf-4e64-83cf-5f62d3c90b09"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.457706 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9845\" (UniqueName: \"kubernetes.io/projected/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-kube-api-access-s9845\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.457737 4492 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.457752 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.457763 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.918767 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dhdz4" event={"ID":"74c85bf8-cdcf-4e64-83cf-5f62d3c90b09","Type":"ContainerDied","Data":"cbd48cc6e20d44323e51fe33f40edf022157d6b9fd9cddcdd08f6d66a9e46aff"} Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.919214 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbd48cc6e20d44323e51fe33f40edf022157d6b9fd9cddcdd08f6d66a9e46aff" Nov 26 07:08:46 crc kubenswrapper[4492]: I1126 07:08:46.918843 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dhdz4" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.104576 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.104924 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerName="nova-api-log" containerID="cri-o://349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761" gracePeriod=30 Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.105122 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerName="nova-api-api" containerID="cri-o://4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855" gracePeriod=30 Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.122765 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.123073 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="53275682-2daa-41b3-a4b8-daaf0156e239" containerName="nova-scheduler-scheduler" containerID="cri-o://9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58" gracePeriod=30 Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.159560 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.159768 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-log" containerID="cri-o://66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455" gracePeriod=30 Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.159854 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-metadata" containerID="cri-o://0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247" gracePeriod=30 Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.662139 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.682411 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-internal-tls-certs\") pod \"e2c3d448-6c92-456b-a3db-49b9774ebb58\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.682499 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frctl\" (UniqueName: \"kubernetes.io/projected/e2c3d448-6c92-456b-a3db-49b9774ebb58-kube-api-access-frctl\") pod \"e2c3d448-6c92-456b-a3db-49b9774ebb58\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.682545 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-combined-ca-bundle\") pod \"e2c3d448-6c92-456b-a3db-49b9774ebb58\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.682620 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-config-data\") pod \"e2c3d448-6c92-456b-a3db-49b9774ebb58\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.682688 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c3d448-6c92-456b-a3db-49b9774ebb58-logs\") pod \"e2c3d448-6c92-456b-a3db-49b9774ebb58\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.682730 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-public-tls-certs\") pod \"e2c3d448-6c92-456b-a3db-49b9774ebb58\" (UID: \"e2c3d448-6c92-456b-a3db-49b9774ebb58\") " Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.684742 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2c3d448-6c92-456b-a3db-49b9774ebb58-logs" (OuterVolumeSpecName: "logs") pod "e2c3d448-6c92-456b-a3db-49b9774ebb58" (UID: "e2c3d448-6c92-456b-a3db-49b9774ebb58"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.693052 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2c3d448-6c92-456b-a3db-49b9774ebb58-kube-api-access-frctl" (OuterVolumeSpecName: "kube-api-access-frctl") pod "e2c3d448-6c92-456b-a3db-49b9774ebb58" (UID: "e2c3d448-6c92-456b-a3db-49b9774ebb58"). InnerVolumeSpecName "kube-api-access-frctl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.724337 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2c3d448-6c92-456b-a3db-49b9774ebb58" (UID: "e2c3d448-6c92-456b-a3db-49b9774ebb58"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.727320 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-config-data" (OuterVolumeSpecName: "config-data") pod "e2c3d448-6c92-456b-a3db-49b9774ebb58" (UID: "e2c3d448-6c92-456b-a3db-49b9774ebb58"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.737646 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e2c3d448-6c92-456b-a3db-49b9774ebb58" (UID: "e2c3d448-6c92-456b-a3db-49b9774ebb58"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.754348 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e2c3d448-6c92-456b-a3db-49b9774ebb58" (UID: "e2c3d448-6c92-456b-a3db-49b9774ebb58"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.785630 4492 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.785674 4492 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.785685 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frctl\" (UniqueName: \"kubernetes.io/projected/e2c3d448-6c92-456b-a3db-49b9774ebb58-kube-api-access-frctl\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.785700 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.785712 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2c3d448-6c92-456b-a3db-49b9774ebb58-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.785721 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2c3d448-6c92-456b-a3db-49b9774ebb58-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.930812 4492 generic.go:334] "Generic (PLEG): container finished" podID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerID="66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455" exitCode=143 Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.930973 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ace53bbf-c004-4557-ad30-eac2ac4f64ea","Type":"ContainerDied","Data":"66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455"} Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.935164 4492 generic.go:334] "Generic (PLEG): container finished" podID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerID="4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855" exitCode=0 Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.935265 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.935281 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c3d448-6c92-456b-a3db-49b9774ebb58","Type":"ContainerDied","Data":"4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855"} Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.935347 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c3d448-6c92-456b-a3db-49b9774ebb58","Type":"ContainerDied","Data":"349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761"} Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.935381 4492 scope.go:117] "RemoveContainer" containerID="4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.935274 4492 generic.go:334] "Generic (PLEG): container finished" podID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerID="349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761" exitCode=143 Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.935559 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e2c3d448-6c92-456b-a3db-49b9774ebb58","Type":"ContainerDied","Data":"17380fc7bbbb16bcf281f3fa4fad5980fc267cec8e82c12a8e82277ebcb1fab2"} Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.967524 4492 scope.go:117] "RemoveContainer" containerID="349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761" Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.969030 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:47 crc kubenswrapper[4492]: I1126 07:08:47.987462 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.006385 4492 scope.go:117] "RemoveContainer" containerID="4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855" Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.007165 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855\": container with ID starting with 4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855 not found: ID does not exist" containerID="4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.007232 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855"} err="failed to get container status \"4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855\": rpc error: code = NotFound desc = could not find container \"4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855\": container with ID starting with 4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855 not found: ID does not exist" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.007261 4492 scope.go:117] "RemoveContainer" containerID="349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761" Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.007482 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761\": container with ID starting with 349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761 not found: ID does not exist" containerID="349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.007507 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761"} err="failed to get container status \"349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761\": rpc error: code = NotFound desc = could not find container \"349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761\": container with ID starting with 349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761 not found: ID does not exist" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.007531 4492 scope.go:117] "RemoveContainer" containerID="4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.007760 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855"} err="failed to get container status \"4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855\": rpc error: code = NotFound desc = could not find container \"4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855\": container with ID starting with 4bdf3252f1d81a4329cc700df11d7f4faa66966b2c0e4cdba0fd9fe3b4bac855 not found: ID does not exist" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.007844 4492 scope.go:117] "RemoveContainer" containerID="349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.008142 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761"} err="failed to get container status \"349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761\": rpc error: code = NotFound desc = could not find container \"349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761\": container with ID starting with 349b12ca93d59606bdc1c3b48fc62fda5787030b41588a0956c96148b679f761 not found: ID does not exist" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.028417 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.029644 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerName="nova-api-api" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.029722 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerName="nova-api-api" Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.029798 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ac796a9-7e9c-44f1-9fe4-0b7457c53334" containerName="dnsmasq-dns" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.029840 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ac796a9-7e9c-44f1-9fe4-0b7457c53334" containerName="dnsmasq-dns" Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.029892 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74c85bf8-cdcf-4e64-83cf-5f62d3c90b09" containerName="nova-manage" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.029930 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="74c85bf8-cdcf-4e64-83cf-5f62d3c90b09" containerName="nova-manage" Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.030003 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ac796a9-7e9c-44f1-9fe4-0b7457c53334" containerName="init" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.030054 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ac796a9-7e9c-44f1-9fe4-0b7457c53334" containerName="init" Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.030108 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerName="nova-api-log" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.030146 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerName="nova-api-log" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.030695 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ac796a9-7e9c-44f1-9fe4-0b7457c53334" containerName="dnsmasq-dns" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.030779 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="74c85bf8-cdcf-4e64-83cf-5f62d3c90b09" containerName="nova-manage" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.030828 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerName="nova-api-log" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.030892 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2c3d448-6c92-456b-a3db-49b9774ebb58" containerName="nova-api-api" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.035018 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.038039 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.038324 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.038718 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.046491 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.098974 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzmzj\" (UniqueName: \"kubernetes.io/projected/67081b5d-2788-422e-8e4c-69aa8b1b1321-kube-api-access-zzmzj\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.099054 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-internal-tls-certs\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.099088 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-public-tls-certs\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.099113 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.099160 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-config-data\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.099217 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67081b5d-2788-422e-8e4c-69aa8b1b1321-logs\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.200904 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzmzj\" (UniqueName: \"kubernetes.io/projected/67081b5d-2788-422e-8e4c-69aa8b1b1321-kube-api-access-zzmzj\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.200980 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-internal-tls-certs\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.201017 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-public-tls-certs\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.201050 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.201097 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-config-data\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.201135 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67081b5d-2788-422e-8e4c-69aa8b1b1321-logs\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.201691 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67081b5d-2788-422e-8e4c-69aa8b1b1321-logs\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.207673 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-config-data\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.208291 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-public-tls-certs\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.210604 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-internal-tls-certs\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.210736 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67081b5d-2788-422e-8e4c-69aa8b1b1321-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.218632 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzmzj\" (UniqueName: \"kubernetes.io/projected/67081b5d-2788-422e-8e4c-69aa8b1b1321-kube-api-access-zzmzj\") pod \"nova-api-0\" (UID: \"67081b5d-2788-422e-8e4c-69aa8b1b1321\") " pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.358999 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.460638 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2c3d448-6c92-456b-a3db-49b9774ebb58" path="/var/lib/kubelet/pods/e2c3d448-6c92-456b-a3db-49b9774ebb58/volumes" Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.742048 4492 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.748239 4492 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.750627 4492 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 07:08:48 crc kubenswrapper[4492]: E1126 07:08:48.750683 4492 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="53275682-2daa-41b3-a4b8-daaf0156e239" containerName="nova-scheduler-scheduler" Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.793918 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 07:08:48 crc kubenswrapper[4492]: W1126 07:08:48.794762 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67081b5d_2788_422e_8e4c_69aa8b1b1321.slice/crio-a1aea1ad7aa299783ab41e6bbfda86139921f015112cb29417dd94cc2ede2c88 WatchSource:0}: Error finding container a1aea1ad7aa299783ab41e6bbfda86139921f015112cb29417dd94cc2ede2c88: Status 404 returned error can't find the container with id a1aea1ad7aa299783ab41e6bbfda86139921f015112cb29417dd94cc2ede2c88 Nov 26 07:08:48 crc kubenswrapper[4492]: I1126 07:08:48.950221 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"67081b5d-2788-422e-8e4c-69aa8b1b1321","Type":"ContainerStarted","Data":"a1aea1ad7aa299783ab41e6bbfda86139921f015112cb29417dd94cc2ede2c88"} Nov 26 07:08:49 crc kubenswrapper[4492]: I1126 07:08:49.961035 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"67081b5d-2788-422e-8e4c-69aa8b1b1321","Type":"ContainerStarted","Data":"2fe37baf232a652429f955969ff1b3fae587a62ece65da71b9444785d6fd6ac2"} Nov 26 07:08:49 crc kubenswrapper[4492]: I1126 07:08:49.961392 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"67081b5d-2788-422e-8e4c-69aa8b1b1321","Type":"ContainerStarted","Data":"69255c51ab1f9d7a582b9e64c6355970ebfc6fa609d54d5ad3fa72f9e143c2b0"} Nov 26 07:08:49 crc kubenswrapper[4492]: I1126 07:08:49.986133 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.9861079090000002 podStartE2EDuration="2.986107909s" podCreationTimestamp="2025-11-26 07:08:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:49.979205111 +0000 UTC m=+1225.863093410" watchObservedRunningTime="2025-11-26 07:08:49.986107909 +0000 UTC m=+1225.869996208" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.295845 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.209:8775/\": read tcp 10.217.0.2:53932->10.217.0.209:8775: read: connection reset by peer" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.295906 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.209:8775/\": read tcp 10.217.0.2:53944->10.217.0.209:8775: read: connection reset by peer" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.695835 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.757872 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4hbj\" (UniqueName: \"kubernetes.io/projected/ace53bbf-c004-4557-ad30-eac2ac4f64ea-kube-api-access-g4hbj\") pod \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.758027 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-config-data\") pod \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.758114 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ace53bbf-c004-4557-ad30-eac2ac4f64ea-logs\") pod \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.758312 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-combined-ca-bundle\") pod \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.758389 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-nova-metadata-tls-certs\") pod \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\" (UID: \"ace53bbf-c004-4557-ad30-eac2ac4f64ea\") " Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.760387 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ace53bbf-c004-4557-ad30-eac2ac4f64ea-logs" (OuterVolumeSpecName: "logs") pod "ace53bbf-c004-4557-ad30-eac2ac4f64ea" (UID: "ace53bbf-c004-4557-ad30-eac2ac4f64ea"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.770312 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ace53bbf-c004-4557-ad30-eac2ac4f64ea-kube-api-access-g4hbj" (OuterVolumeSpecName: "kube-api-access-g4hbj") pod "ace53bbf-c004-4557-ad30-eac2ac4f64ea" (UID: "ace53bbf-c004-4557-ad30-eac2ac4f64ea"). InnerVolumeSpecName "kube-api-access-g4hbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.811578 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-config-data" (OuterVolumeSpecName: "config-data") pod "ace53bbf-c004-4557-ad30-eac2ac4f64ea" (UID: "ace53bbf-c004-4557-ad30-eac2ac4f64ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.857598 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ace53bbf-c004-4557-ad30-eac2ac4f64ea" (UID: "ace53bbf-c004-4557-ad30-eac2ac4f64ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.862581 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.862613 4492 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ace53bbf-c004-4557-ad30-eac2ac4f64ea-logs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.862624 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.862641 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4hbj\" (UniqueName: \"kubernetes.io/projected/ace53bbf-c004-4557-ad30-eac2ac4f64ea-kube-api-access-g4hbj\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.873293 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "ace53bbf-c004-4557-ad30-eac2ac4f64ea" (UID: "ace53bbf-c004-4557-ad30-eac2ac4f64ea"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.963675 4492 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ace53bbf-c004-4557-ad30-eac2ac4f64ea-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.971808 4492 generic.go:334] "Generic (PLEG): container finished" podID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerID="0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247" exitCode=0 Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.971915 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.971873 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ace53bbf-c004-4557-ad30-eac2ac4f64ea","Type":"ContainerDied","Data":"0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247"} Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.972084 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ace53bbf-c004-4557-ad30-eac2ac4f64ea","Type":"ContainerDied","Data":"7c88123ee23029eb1d855406828df9b392a8268ae13d6e0c36b1cf3e32c6468b"} Nov 26 07:08:50 crc kubenswrapper[4492]: I1126 07:08:50.972115 4492 scope.go:117] "RemoveContainer" containerID="0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.000193 4492 scope.go:117] "RemoveContainer" containerID="66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.016043 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.018078 4492 scope.go:117] "RemoveContainer" containerID="0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247" Nov 26 07:08:51 crc kubenswrapper[4492]: E1126 07:08:51.019453 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247\": container with ID starting with 0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247 not found: ID does not exist" containerID="0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.019557 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247"} err="failed to get container status \"0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247\": rpc error: code = NotFound desc = could not find container \"0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247\": container with ID starting with 0138605cdc97dbe5fb3f81b092a81eb2f47abe4a1cc8364fbf79f3a20b46e247 not found: ID does not exist" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.019630 4492 scope.go:117] "RemoveContainer" containerID="66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455" Nov 26 07:08:51 crc kubenswrapper[4492]: E1126 07:08:51.019955 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455\": container with ID starting with 66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455 not found: ID does not exist" containerID="66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.020028 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455"} err="failed to get container status \"66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455\": rpc error: code = NotFound desc = could not find container \"66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455\": container with ID starting with 66b07abe56febf6635ffeae57fc486a01b07c25ca17c7bd948b10f27cae68455 not found: ID does not exist" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.029720 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.037148 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:51 crc kubenswrapper[4492]: E1126 07:08:51.037641 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-log" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.037662 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-log" Nov 26 07:08:51 crc kubenswrapper[4492]: E1126 07:08:51.037703 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-metadata" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.037710 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-metadata" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.037900 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-log" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.037919 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" containerName="nova-metadata-metadata" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.039032 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.042696 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.044388 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.065723 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-config-data\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.065823 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.065852 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-logs\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.065895 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6sbf\" (UniqueName: \"kubernetes.io/projected/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-kube-api-access-m6sbf\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.065932 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.066241 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.168106 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-config-data\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.168251 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-logs\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.168282 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.168319 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6sbf\" (UniqueName: \"kubernetes.io/projected/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-kube-api-access-m6sbf\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.168357 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.168910 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-logs\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.173421 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.173426 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.180566 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-config-data\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.183696 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6sbf\" (UniqueName: \"kubernetes.io/projected/aec6cd1c-ba45-4e6d-869d-7286a4bb4947-kube-api-access-m6sbf\") pod \"nova-metadata-0\" (UID: \"aec6cd1c-ba45-4e6d-869d-7286a4bb4947\") " pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.356712 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.770004 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.984741 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"aec6cd1c-ba45-4e6d-869d-7286a4bb4947","Type":"ContainerStarted","Data":"42cdd1988e79067f9de6c878f4a312eec1a3a5c8ae638c79975f8d124a7b8490"} Nov 26 07:08:51 crc kubenswrapper[4492]: I1126 07:08:51.984809 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"aec6cd1c-ba45-4e6d-869d-7286a4bb4947","Type":"ContainerStarted","Data":"459929373d5649bc2b228e3f49313abe7c548db2fea434538f37dcca8b41bfb9"} Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.279935 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.397093 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-combined-ca-bundle\") pod \"53275682-2daa-41b3-a4b8-daaf0156e239\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.397259 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-config-data\") pod \"53275682-2daa-41b3-a4b8-daaf0156e239\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.397724 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqwpb\" (UniqueName: \"kubernetes.io/projected/53275682-2daa-41b3-a4b8-daaf0156e239-kube-api-access-sqwpb\") pod \"53275682-2daa-41b3-a4b8-daaf0156e239\" (UID: \"53275682-2daa-41b3-a4b8-daaf0156e239\") " Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.402737 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53275682-2daa-41b3-a4b8-daaf0156e239-kube-api-access-sqwpb" (OuterVolumeSpecName: "kube-api-access-sqwpb") pod "53275682-2daa-41b3-a4b8-daaf0156e239" (UID: "53275682-2daa-41b3-a4b8-daaf0156e239"). InnerVolumeSpecName "kube-api-access-sqwpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.421332 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-config-data" (OuterVolumeSpecName: "config-data") pod "53275682-2daa-41b3-a4b8-daaf0156e239" (UID: "53275682-2daa-41b3-a4b8-daaf0156e239"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.421459 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "53275682-2daa-41b3-a4b8-daaf0156e239" (UID: "53275682-2daa-41b3-a4b8-daaf0156e239"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.448074 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ace53bbf-c004-4557-ad30-eac2ac4f64ea" path="/var/lib/kubelet/pods/ace53bbf-c004-4557-ad30-eac2ac4f64ea/volumes" Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.500961 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.500994 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53275682-2daa-41b3-a4b8-daaf0156e239-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:52 crc kubenswrapper[4492]: I1126 07:08:52.501005 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqwpb\" (UniqueName: \"kubernetes.io/projected/53275682-2daa-41b3-a4b8-daaf0156e239-kube-api-access-sqwpb\") on node \"crc\" DevicePath \"\"" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.002805 4492 generic.go:334] "Generic (PLEG): container finished" podID="53275682-2daa-41b3-a4b8-daaf0156e239" containerID="9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58" exitCode=0 Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.002895 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.002958 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"53275682-2daa-41b3-a4b8-daaf0156e239","Type":"ContainerDied","Data":"9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58"} Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.003839 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"53275682-2daa-41b3-a4b8-daaf0156e239","Type":"ContainerDied","Data":"7d7e206f4ac7559857fa00e63a15c6dfebbcd19eb483065a853268acfe0625cd"} Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.003900 4492 scope.go:117] "RemoveContainer" containerID="9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.008548 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"aec6cd1c-ba45-4e6d-869d-7286a4bb4947","Type":"ContainerStarted","Data":"3a2e598fb19d3156adc1e0e0c96c32caa396349a611f2e232d1a0a0fdfd8a6de"} Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.026145 4492 scope.go:117] "RemoveContainer" containerID="9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58" Nov 26 07:08:53 crc kubenswrapper[4492]: E1126 07:08:53.026447 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58\": container with ID starting with 9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58 not found: ID does not exist" containerID="9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.026532 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58"} err="failed to get container status \"9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58\": rpc error: code = NotFound desc = could not find container \"9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58\": container with ID starting with 9450d16206557faeaf972aa2373cfef2520cbfd4b9b87cdf6cdee7a4b2dcef58 not found: ID does not exist" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.033613 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.033595197 podStartE2EDuration="2.033595197s" podCreationTimestamp="2025-11-26 07:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:53.031695697 +0000 UTC m=+1228.915583994" watchObservedRunningTime="2025-11-26 07:08:53.033595197 +0000 UTC m=+1228.917483495" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.051837 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.061193 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.067426 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:53 crc kubenswrapper[4492]: E1126 07:08:53.068002 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53275682-2daa-41b3-a4b8-daaf0156e239" containerName="nova-scheduler-scheduler" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.068024 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="53275682-2daa-41b3-a4b8-daaf0156e239" containerName="nova-scheduler-scheduler" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.068211 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="53275682-2daa-41b3-a4b8-daaf0156e239" containerName="nova-scheduler-scheduler" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.068905 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.071382 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.076062 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.111358 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e94662c-44fb-4c7c-b834-4b89eab03adf-config-data\") pod \"nova-scheduler-0\" (UID: \"7e94662c-44fb-4c7c-b834-4b89eab03adf\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.111435 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m456l\" (UniqueName: \"kubernetes.io/projected/7e94662c-44fb-4c7c-b834-4b89eab03adf-kube-api-access-m456l\") pod \"nova-scheduler-0\" (UID: \"7e94662c-44fb-4c7c-b834-4b89eab03adf\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.111513 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e94662c-44fb-4c7c-b834-4b89eab03adf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7e94662c-44fb-4c7c-b834-4b89eab03adf\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.212138 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m456l\" (UniqueName: \"kubernetes.io/projected/7e94662c-44fb-4c7c-b834-4b89eab03adf-kube-api-access-m456l\") pod \"nova-scheduler-0\" (UID: \"7e94662c-44fb-4c7c-b834-4b89eab03adf\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.212237 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e94662c-44fb-4c7c-b834-4b89eab03adf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7e94662c-44fb-4c7c-b834-4b89eab03adf\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.212313 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e94662c-44fb-4c7c-b834-4b89eab03adf-config-data\") pod \"nova-scheduler-0\" (UID: \"7e94662c-44fb-4c7c-b834-4b89eab03adf\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.218065 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e94662c-44fb-4c7c-b834-4b89eab03adf-config-data\") pod \"nova-scheduler-0\" (UID: \"7e94662c-44fb-4c7c-b834-4b89eab03adf\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.223865 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e94662c-44fb-4c7c-b834-4b89eab03adf-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7e94662c-44fb-4c7c-b834-4b89eab03adf\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.229927 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m456l\" (UniqueName: \"kubernetes.io/projected/7e94662c-44fb-4c7c-b834-4b89eab03adf-kube-api-access-m456l\") pod \"nova-scheduler-0\" (UID: \"7e94662c-44fb-4c7c-b834-4b89eab03adf\") " pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.387657 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 07:08:53 crc kubenswrapper[4492]: I1126 07:08:53.828440 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 07:08:54 crc kubenswrapper[4492]: I1126 07:08:54.033576 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7e94662c-44fb-4c7c-b834-4b89eab03adf","Type":"ContainerStarted","Data":"1ccbec73e88c604ad24a8223329822978566b9928143236ad513166b917dd42c"} Nov 26 07:08:54 crc kubenswrapper[4492]: I1126 07:08:54.033987 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7e94662c-44fb-4c7c-b834-4b89eab03adf","Type":"ContainerStarted","Data":"4040f13a4ff4257e8bd13ff110d1a4d6d9c37d9cf03d3acb259bb3890f95767a"} Nov 26 07:08:54 crc kubenswrapper[4492]: I1126 07:08:54.057787 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.057769443 podStartE2EDuration="1.057769443s" podCreationTimestamp="2025-11-26 07:08:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:08:54.051386602 +0000 UTC m=+1229.935274900" watchObservedRunningTime="2025-11-26 07:08:54.057769443 +0000 UTC m=+1229.941657741" Nov 26 07:08:54 crc kubenswrapper[4492]: I1126 07:08:54.451136 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53275682-2daa-41b3-a4b8-daaf0156e239" path="/var/lib/kubelet/pods/53275682-2daa-41b3-a4b8-daaf0156e239/volumes" Nov 26 07:08:56 crc kubenswrapper[4492]: I1126 07:08:56.357853 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:08:56 crc kubenswrapper[4492]: I1126 07:08:56.358223 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 07:08:58 crc kubenswrapper[4492]: I1126 07:08:58.359424 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:08:58 crc kubenswrapper[4492]: I1126 07:08:58.359811 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 07:08:58 crc kubenswrapper[4492]: I1126 07:08:58.388793 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 07:08:59 crc kubenswrapper[4492]: I1126 07:08:59.373294 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="67081b5d-2788-422e-8e4c-69aa8b1b1321" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.219:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:08:59 crc kubenswrapper[4492]: I1126 07:08:59.373584 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="67081b5d-2788-422e-8e4c-69aa8b1b1321" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.219:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:09:01 crc kubenswrapper[4492]: I1126 07:09:01.357605 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 07:09:01 crc kubenswrapper[4492]: I1126 07:09:01.357989 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 07:09:02 crc kubenswrapper[4492]: I1126 07:09:02.378322 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="aec6cd1c-ba45-4e6d-869d-7286a4bb4947" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.220:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:09:02 crc kubenswrapper[4492]: I1126 07:09:02.378392 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="aec6cd1c-ba45-4e6d-869d-7286a4bb4947" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.220:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 07:09:03 crc kubenswrapper[4492]: I1126 07:09:03.388401 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 07:09:03 crc kubenswrapper[4492]: I1126 07:09:03.413222 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 07:09:04 crc kubenswrapper[4492]: I1126 07:09:04.170185 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 07:09:08 crc kubenswrapper[4492]: I1126 07:09:08.149267 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 07:09:08 crc kubenswrapper[4492]: I1126 07:09:08.366531 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 07:09:08 crc kubenswrapper[4492]: I1126 07:09:08.367216 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 07:09:08 crc kubenswrapper[4492]: I1126 07:09:08.371741 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 07:09:08 crc kubenswrapper[4492]: I1126 07:09:08.378211 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 07:09:09 crc kubenswrapper[4492]: I1126 07:09:09.194312 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 07:09:09 crc kubenswrapper[4492]: I1126 07:09:09.204284 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 07:09:11 crc kubenswrapper[4492]: I1126 07:09:11.361583 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 07:09:11 crc kubenswrapper[4492]: I1126 07:09:11.363188 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 07:09:11 crc kubenswrapper[4492]: I1126 07:09:11.367315 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 07:09:12 crc kubenswrapper[4492]: I1126 07:09:12.231879 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 07:09:18 crc kubenswrapper[4492]: I1126 07:09:18.931604 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:09:19 crc kubenswrapper[4492]: I1126 07:09:19.441103 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:09:19 crc kubenswrapper[4492]: I1126 07:09:19.441158 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:09:20 crc kubenswrapper[4492]: I1126 07:09:20.081895 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:09:24 crc kubenswrapper[4492]: I1126 07:09:24.149938 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" containerName="rabbitmq" containerID="cri-o://7da8e27aa6f0aa6e523745e3a62bc9ce4f5d3a4d4db083a0fa5f0d0f8afe014c" gracePeriod=604795 Nov 26 07:09:25 crc kubenswrapper[4492]: I1126 07:09:25.488898 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="3bb75c38-10db-46c0-947c-3d91eca8f110" containerName="rabbitmq" containerID="cri-o://ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da" gracePeriod=604795 Nov 26 07:09:25 crc kubenswrapper[4492]: I1126 07:09:25.656932 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="3bb75c38-10db-46c0-947c-3d91eca8f110" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.95:5671: connect: connection refused" Nov 26 07:09:25 crc kubenswrapper[4492]: I1126 07:09:25.931321 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.96:5671: connect: connection refused" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.449793 4492 generic.go:334] "Generic (PLEG): container finished" podID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" containerID="7da8e27aa6f0aa6e523745e3a62bc9ce4f5d3a4d4db083a0fa5f0d0f8afe014c" exitCode=0 Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.450090 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"daa58280-e6a7-477f-bfdb-accd4f56ac4d","Type":"ContainerDied","Data":"7da8e27aa6f0aa6e523745e3a62bc9ce4f5d3a4d4db083a0fa5f0d0f8afe014c"} Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.747308 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.830352 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-plugins-conf\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.830405 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dc4k4\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-kube-api-access-dc4k4\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.830532 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/daa58280-e6a7-477f-bfdb-accd4f56ac4d-erlang-cookie-secret\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.830631 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-confd\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.830709 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/daa58280-e6a7-477f-bfdb-accd4f56ac4d-pod-info\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.839807 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/daa58280-e6a7-477f-bfdb-accd4f56ac4d-pod-info" (OuterVolumeSpecName: "pod-info") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.850306 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-kube-api-access-dc4k4" (OuterVolumeSpecName: "kube-api-access-dc4k4") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "kube-api-access-dc4k4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.851213 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.855348 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daa58280-e6a7-477f-bfdb-accd4f56ac4d-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.932241 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-erlang-cookie\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.932432 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-plugins\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.932700 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-tls\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.932791 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-config-data\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.932874 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-server-conf\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.932959 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\" (UID: \"daa58280-e6a7-477f-bfdb-accd4f56ac4d\") " Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.936343 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.938423 4492 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.938513 4492 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.938585 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dc4k4\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-kube-api-access-dc4k4\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.938643 4492 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/daa58280-e6a7-477f-bfdb-accd4f56ac4d-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.938690 4492 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/daa58280-e6a7-477f-bfdb-accd4f56ac4d-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.946049 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.948005 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.951332 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.964791 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f5c57d7b9-64rz7"] Nov 26 07:09:30 crc kubenswrapper[4492]: E1126 07:09:30.965251 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" containerName="setup-container" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.965283 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" containerName="setup-container" Nov 26 07:09:30 crc kubenswrapper[4492]: E1126 07:09:30.965310 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" containerName="rabbitmq" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.965316 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" containerName="rabbitmq" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.965516 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" containerName="rabbitmq" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.966543 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.978822 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-config-data" (OuterVolumeSpecName: "config-data") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:30 crc kubenswrapper[4492]: I1126 07:09:30.988882 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:30.999618 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f5c57d7b9-64rz7"] Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.020748 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-server-conf" (OuterVolumeSpecName: "server-conf") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039493 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-config\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039533 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039577 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-svc\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039595 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-sb\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039624 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-nb\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039652 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qb24\" (UniqueName: \"kubernetes.io/projected/41ac2992-bfcb-4138-8f46-f5521b090f19-kube-api-access-4qb24\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039732 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-swift-storage-0\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039797 4492 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039816 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039836 4492 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039845 4492 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/daa58280-e6a7-477f-bfdb-accd4f56ac4d-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.039853 4492 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.048985 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "daa58280-e6a7-477f-bfdb-accd4f56ac4d" (UID: "daa58280-e6a7-477f-bfdb-accd4f56ac4d"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.061872 4492 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.142303 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-swift-storage-0\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.142735 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-config\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.142766 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.142812 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-svc\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.142839 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-sb\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.142872 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-nb\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.142904 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qb24\" (UniqueName: \"kubernetes.io/projected/41ac2992-bfcb-4138-8f46-f5521b090f19-kube-api-access-4qb24\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.143015 4492 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.143029 4492 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/daa58280-e6a7-477f-bfdb-accd4f56ac4d-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.143825 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-config\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.144163 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-svc\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.144201 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-sb\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.144461 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.144729 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-nb\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.145079 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-swift-storage-0\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.158628 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qb24\" (UniqueName: \"kubernetes.io/projected/41ac2992-bfcb-4138-8f46-f5521b090f19-kube-api-access-4qb24\") pod \"dnsmasq-dns-6f5c57d7b9-64rz7\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.299497 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.489521 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"daa58280-e6a7-477f-bfdb-accd4f56ac4d","Type":"ContainerDied","Data":"0ac47d0bf75344d8e6895e5db6fd3abd9d9ea667f31e08dae7425d8c56cf2f8f"} Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.489604 4492 scope.go:117] "RemoveContainer" containerID="7da8e27aa6f0aa6e523745e3a62bc9ce4f5d3a4d4db083a0fa5f0d0f8afe014c" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.489804 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.550869 4492 scope.go:117] "RemoveContainer" containerID="0608b29441266fad95d69d5b2720f135463681abce1d524e8aa820621905da40" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.551268 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.614268 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.637336 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.639492 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.643265 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-fw95k" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.643638 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.643862 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.643912 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.644032 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.644084 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.644528 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.654979 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.703046 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f5c57d7b9-64rz7"] Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.796181 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e248296-fb08-4818-b4e7-6db19d55a3ba-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.796347 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e248296-fb08-4818-b4e7-6db19d55a3ba-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.796539 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e248296-fb08-4818-b4e7-6db19d55a3ba-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.796728 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.796765 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.796913 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.797039 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.797064 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.797192 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e248296-fb08-4818-b4e7-6db19d55a3ba-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.797725 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66lfc\" (UniqueName: \"kubernetes.io/projected/4e248296-fb08-4818-b4e7-6db19d55a3ba-kube-api-access-66lfc\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.797850 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e248296-fb08-4818-b4e7-6db19d55a3ba-config-data\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904214 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66lfc\" (UniqueName: \"kubernetes.io/projected/4e248296-fb08-4818-b4e7-6db19d55a3ba-kube-api-access-66lfc\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904494 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e248296-fb08-4818-b4e7-6db19d55a3ba-config-data\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904555 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e248296-fb08-4818-b4e7-6db19d55a3ba-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904575 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e248296-fb08-4818-b4e7-6db19d55a3ba-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904593 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e248296-fb08-4818-b4e7-6db19d55a3ba-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904622 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904641 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904681 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904716 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904731 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.904748 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e248296-fb08-4818-b4e7-6db19d55a3ba-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.905918 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.906011 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.906675 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4e248296-fb08-4818-b4e7-6db19d55a3ba-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.907090 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4e248296-fb08-4818-b4e7-6db19d55a3ba-config-data\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.907228 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4e248296-fb08-4818-b4e7-6db19d55a3ba-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.907575 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.928094 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.928611 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4e248296-fb08-4818-b4e7-6db19d55a3ba-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.928895 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4e248296-fb08-4818-b4e7-6db19d55a3ba-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.928956 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4e248296-fb08-4818-b4e7-6db19d55a3ba-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.956437 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66lfc\" (UniqueName: \"kubernetes.io/projected/4e248296-fb08-4818-b4e7-6db19d55a3ba-kube-api-access-66lfc\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.965065 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"4e248296-fb08-4818-b4e7-6db19d55a3ba\") " pod="openstack/rabbitmq-server-0" Nov 26 07:09:31 crc kubenswrapper[4492]: I1126 07:09:31.981608 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.079761 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.115801 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.115890 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-plugins\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.115921 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-tls\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.116121 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-erlang-cookie\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.116208 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-plugins-conf\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.116240 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-server-conf\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.116271 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3bb75c38-10db-46c0-947c-3d91eca8f110-erlang-cookie-secret\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.116297 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-confd\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.116367 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-config-data\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.116388 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3bb75c38-10db-46c0-947c-3d91eca8f110-pod-info\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.116466 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6grv\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-kube-api-access-f6grv\") pod \"3bb75c38-10db-46c0-947c-3d91eca8f110\" (UID: \"3bb75c38-10db-46c0-947c-3d91eca8f110\") " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.116524 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.117214 4492 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.120391 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.120559 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.123069 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "persistence") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.128887 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-kube-api-access-f6grv" (OuterVolumeSpecName: "kube-api-access-f6grv") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "kube-api-access-f6grv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.130602 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/3bb75c38-10db-46c0-947c-3d91eca8f110-pod-info" (OuterVolumeSpecName: "pod-info") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.133362 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.138710 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb75c38-10db-46c0-947c-3d91eca8f110-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.192619 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-config-data" (OuterVolumeSpecName: "config-data") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.217275 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-server-conf" (OuterVolumeSpecName: "server-conf") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.218912 4492 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.218989 4492 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.219040 4492 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.219086 4492 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3bb75c38-10db-46c0-947c-3d91eca8f110-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.219142 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3bb75c38-10db-46c0-947c-3d91eca8f110-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.219394 4492 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3bb75c38-10db-46c0-947c-3d91eca8f110-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.219458 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6grv\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-kube-api-access-f6grv\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.219538 4492 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.220730 4492 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.248911 4492 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.299753 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "3bb75c38-10db-46c0-947c-3d91eca8f110" (UID: "3bb75c38-10db-46c0-947c-3d91eca8f110"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.341733 4492 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3bb75c38-10db-46c0-947c-3d91eca8f110-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.341771 4492 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.467352 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daa58280-e6a7-477f-bfdb-accd4f56ac4d" path="/var/lib/kubelet/pods/daa58280-e6a7-477f-bfdb-accd4f56ac4d/volumes" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.473257 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.525050 4492 generic.go:334] "Generic (PLEG): container finished" podID="3bb75c38-10db-46c0-947c-3d91eca8f110" containerID="ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da" exitCode=0 Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.525112 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.525188 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3bb75c38-10db-46c0-947c-3d91eca8f110","Type":"ContainerDied","Data":"ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da"} Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.527400 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3bb75c38-10db-46c0-947c-3d91eca8f110","Type":"ContainerDied","Data":"9459340d60f48000ea76afd601230115cc2d298bdb618f8e5d7cc7f2814e99f7"} Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.527429 4492 scope.go:117] "RemoveContainer" containerID="ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.527618 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4e248296-fb08-4818-b4e7-6db19d55a3ba","Type":"ContainerStarted","Data":"7588e9c7ec2e1c38aa2c94834e535316ace606084bcccfe873e84583fb6fedaf"} Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.533480 4492 generic.go:334] "Generic (PLEG): container finished" podID="41ac2992-bfcb-4138-8f46-f5521b090f19" containerID="9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762" exitCode=0 Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.533541 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" event={"ID":"41ac2992-bfcb-4138-8f46-f5521b090f19","Type":"ContainerDied","Data":"9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762"} Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.533567 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" event={"ID":"41ac2992-bfcb-4138-8f46-f5521b090f19","Type":"ContainerStarted","Data":"725871626b93ff2bf0c13df6f1af3d11f07b968522a6c82b06af311d975304dc"} Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.585434 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.601426 4492 scope.go:117] "RemoveContainer" containerID="3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.619569 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.638783 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:09:32 crc kubenswrapper[4492]: E1126 07:09:32.639366 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bb75c38-10db-46c0-947c-3d91eca8f110" containerName="rabbitmq" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.639387 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bb75c38-10db-46c0-947c-3d91eca8f110" containerName="rabbitmq" Nov 26 07:09:32 crc kubenswrapper[4492]: E1126 07:09:32.639411 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bb75c38-10db-46c0-947c-3d91eca8f110" containerName="setup-container" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.639418 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bb75c38-10db-46c0-947c-3d91eca8f110" containerName="setup-container" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.639683 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bb75c38-10db-46c0-947c-3d91eca8f110" containerName="rabbitmq" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.641004 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.646400 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-dd6zc" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.648128 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.651429 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.651507 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.652222 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.669786 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.672154 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.702028 4492 scope.go:117] "RemoveContainer" containerID="ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da" Nov 26 07:09:32 crc kubenswrapper[4492]: E1126 07:09:32.709601 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da\": container with ID starting with ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da not found: ID does not exist" containerID="ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.709652 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da"} err="failed to get container status \"ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da\": rpc error: code = NotFound desc = could not find container \"ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da\": container with ID starting with ef4e6edef99d0971fed92d6c5ca10b85b27a007b8446b8eaf8288b22854460da not found: ID does not exist" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.709682 4492 scope.go:117] "RemoveContainer" containerID="3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b" Nov 26 07:09:32 crc kubenswrapper[4492]: E1126 07:09:32.710563 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b\": container with ID starting with 3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b not found: ID does not exist" containerID="3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.710610 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b"} err="failed to get container status \"3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b\": rpc error: code = NotFound desc = could not find container \"3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b\": container with ID starting with 3fbd9cbd29985b4fcb600b83dde33b18c78aefc029c9d5ef728f9406cbc8ad4b not found: ID does not exist" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.718997 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.754358 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/118887c0-eace-4a16-a3b0-7049a057e69e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.754460 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/118887c0-eace-4a16-a3b0-7049a057e69e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.754554 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pdk4\" (UniqueName: \"kubernetes.io/projected/118887c0-eace-4a16-a3b0-7049a057e69e-kube-api-access-9pdk4\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.754830 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.754880 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.754903 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.755076 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.755270 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/118887c0-eace-4a16-a3b0-7049a057e69e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.756591 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.756661 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/118887c0-eace-4a16-a3b0-7049a057e69e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.756755 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/118887c0-eace-4a16-a3b0-7049a057e69e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.860521 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/118887c0-eace-4a16-a3b0-7049a057e69e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.861747 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.861710 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/118887c0-eace-4a16-a3b0-7049a057e69e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.861986 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/118887c0-eace-4a16-a3b0-7049a057e69e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.862826 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/118887c0-eace-4a16-a3b0-7049a057e69e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.862998 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/118887c0-eace-4a16-a3b0-7049a057e69e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.865094 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/118887c0-eace-4a16-a3b0-7049a057e69e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.865308 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pdk4\" (UniqueName: \"kubernetes.io/projected/118887c0-eace-4a16-a3b0-7049a057e69e-kube-api-access-9pdk4\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.866073 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.867983 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.868101 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.868221 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.864793 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/118887c0-eace-4a16-a3b0-7049a057e69e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.863885 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/118887c0-eace-4a16-a3b0-7049a057e69e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.870498 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.870644 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.874652 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.875839 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/118887c0-eace-4a16-a3b0-7049a057e69e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.876914 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/118887c0-eace-4a16-a3b0-7049a057e69e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.877692 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.891530 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/118887c0-eace-4a16-a3b0-7049a057e69e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.898820 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pdk4\" (UniqueName: \"kubernetes.io/projected/118887c0-eace-4a16-a3b0-7049a057e69e-kube-api-access-9pdk4\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:32 crc kubenswrapper[4492]: I1126 07:09:32.958930 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"118887c0-eace-4a16-a3b0-7049a057e69e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:33 crc kubenswrapper[4492]: I1126 07:09:33.013421 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:09:33 crc kubenswrapper[4492]: I1126 07:09:33.549244 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" event={"ID":"41ac2992-bfcb-4138-8f46-f5521b090f19","Type":"ContainerStarted","Data":"74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492"} Nov 26 07:09:33 crc kubenswrapper[4492]: I1126 07:09:33.549848 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:33 crc kubenswrapper[4492]: I1126 07:09:33.577680 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 07:09:33 crc kubenswrapper[4492]: I1126 07:09:33.593762 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" podStartSLOduration=3.593735439 podStartE2EDuration="3.593735439s" podCreationTimestamp="2025-11-26 07:09:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:09:33.590441326 +0000 UTC m=+1269.474329634" watchObservedRunningTime="2025-11-26 07:09:33.593735439 +0000 UTC m=+1269.477623736" Nov 26 07:09:34 crc kubenswrapper[4492]: I1126 07:09:34.447381 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bb75c38-10db-46c0-947c-3d91eca8f110" path="/var/lib/kubelet/pods/3bb75c38-10db-46c0-947c-3d91eca8f110/volumes" Nov 26 07:09:34 crc kubenswrapper[4492]: I1126 07:09:34.559481 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4e248296-fb08-4818-b4e7-6db19d55a3ba","Type":"ContainerStarted","Data":"17b2e83f3556d2851e4f5a7653628097c29a54aa4daae51c15d5c26165ba1bfe"} Nov 26 07:09:34 crc kubenswrapper[4492]: I1126 07:09:34.562051 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"118887c0-eace-4a16-a3b0-7049a057e69e","Type":"ContainerStarted","Data":"27e6d995ca56811945a12fe4b1d909232f836bad0d93bd91584f48bbde6432a2"} Nov 26 07:09:35 crc kubenswrapper[4492]: I1126 07:09:35.574657 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"118887c0-eace-4a16-a3b0-7049a057e69e","Type":"ContainerStarted","Data":"dfff54fb3b69379c742e4699e31c06ce1198fb45627c904a6b398731b9e59299"} Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.301313 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.411218 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7457c658cc-888nk"] Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.416281 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7457c658cc-888nk" podUID="49076130-f96d-4cda-8aff-7fade5d53117" containerName="dnsmasq-dns" containerID="cri-o://aaa24f6d80c56bcfd364731903fc5f4cdd90ff4bbee89a586dd6053ee695e885" gracePeriod=10 Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.573404 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd"] Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.575644 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.609462 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd"] Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.634770 4492 generic.go:334] "Generic (PLEG): container finished" podID="49076130-f96d-4cda-8aff-7fade5d53117" containerID="aaa24f6d80c56bcfd364731903fc5f4cdd90ff4bbee89a586dd6053ee695e885" exitCode=0 Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.634816 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7457c658cc-888nk" event={"ID":"49076130-f96d-4cda-8aff-7fade5d53117","Type":"ContainerDied","Data":"aaa24f6d80c56bcfd364731903fc5f4cdd90ff4bbee89a586dd6053ee695e885"} Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.712404 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-dns-swift-storage-0\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.712454 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvkf7\" (UniqueName: \"kubernetes.io/projected/4c1466dc-153b-4c1f-be10-189ad16f05bb-kube-api-access-pvkf7\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.712483 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-ovsdbserver-sb\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.712515 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-ovsdbserver-nb\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.712547 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-openstack-edpm-ipam\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.712592 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-config\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.712669 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-dns-svc\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.814904 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-dns-svc\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.815402 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-dns-swift-storage-0\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.815453 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvkf7\" (UniqueName: \"kubernetes.io/projected/4c1466dc-153b-4c1f-be10-189ad16f05bb-kube-api-access-pvkf7\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.815483 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-ovsdbserver-sb\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.815536 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-ovsdbserver-nb\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.815593 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-openstack-edpm-ipam\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.815647 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-config\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.816492 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-dns-svc\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.817235 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-dns-swift-storage-0\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.817903 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-config\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.818970 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-ovsdbserver-sb\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.820339 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-ovsdbserver-nb\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.820472 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/4c1466dc-153b-4c1f-be10-189ad16f05bb-openstack-edpm-ipam\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.839737 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvkf7\" (UniqueName: \"kubernetes.io/projected/4c1466dc-153b-4c1f-be10-189ad16f05bb-kube-api-access-pvkf7\") pod \"dnsmasq-dns-5f9cdbb5d5-k4bxd\" (UID: \"4c1466dc-153b-4c1f-be10-189ad16f05bb\") " pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:41 crc kubenswrapper[4492]: I1126 07:09:41.898963 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.030719 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.125949 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-svc\") pod \"49076130-f96d-4cda-8aff-7fade5d53117\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.126123 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-sb\") pod \"49076130-f96d-4cda-8aff-7fade5d53117\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.126150 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-config\") pod \"49076130-f96d-4cda-8aff-7fade5d53117\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.126250 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-swift-storage-0\") pod \"49076130-f96d-4cda-8aff-7fade5d53117\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.126351 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-nb\") pod \"49076130-f96d-4cda-8aff-7fade5d53117\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.126402 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhthb\" (UniqueName: \"kubernetes.io/projected/49076130-f96d-4cda-8aff-7fade5d53117-kube-api-access-rhthb\") pod \"49076130-f96d-4cda-8aff-7fade5d53117\" (UID: \"49076130-f96d-4cda-8aff-7fade5d53117\") " Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.133320 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49076130-f96d-4cda-8aff-7fade5d53117-kube-api-access-rhthb" (OuterVolumeSpecName: "kube-api-access-rhthb") pod "49076130-f96d-4cda-8aff-7fade5d53117" (UID: "49076130-f96d-4cda-8aff-7fade5d53117"). InnerVolumeSpecName "kube-api-access-rhthb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.191258 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-config" (OuterVolumeSpecName: "config") pod "49076130-f96d-4cda-8aff-7fade5d53117" (UID: "49076130-f96d-4cda-8aff-7fade5d53117"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.192144 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "49076130-f96d-4cda-8aff-7fade5d53117" (UID: "49076130-f96d-4cda-8aff-7fade5d53117"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.195082 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "49076130-f96d-4cda-8aff-7fade5d53117" (UID: "49076130-f96d-4cda-8aff-7fade5d53117"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.207053 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "49076130-f96d-4cda-8aff-7fade5d53117" (UID: "49076130-f96d-4cda-8aff-7fade5d53117"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.208004 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "49076130-f96d-4cda-8aff-7fade5d53117" (UID: "49076130-f96d-4cda-8aff-7fade5d53117"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.228984 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.229008 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.229018 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.229029 4492 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.229041 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/49076130-f96d-4cda-8aff-7fade5d53117-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.229050 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhthb\" (UniqueName: \"kubernetes.io/projected/49076130-f96d-4cda-8aff-7fade5d53117-kube-api-access-rhthb\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.357425 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd"] Nov 26 07:09:42 crc kubenswrapper[4492]: W1126 07:09:42.369687 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c1466dc_153b_4c1f_be10_189ad16f05bb.slice/crio-79212a3a20a25fc53185525f46bf25ba90ea27783b91720edda989cd00621116 WatchSource:0}: Error finding container 79212a3a20a25fc53185525f46bf25ba90ea27783b91720edda989cd00621116: Status 404 returned error can't find the container with id 79212a3a20a25fc53185525f46bf25ba90ea27783b91720edda989cd00621116 Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.654274 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7457c658cc-888nk" event={"ID":"49076130-f96d-4cda-8aff-7fade5d53117","Type":"ContainerDied","Data":"cd0f33471390c4fabb4fc44fbe60a029b86153cd2e48d972e7039b9f3be60565"} Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.654330 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7457c658cc-888nk" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.654371 4492 scope.go:117] "RemoveContainer" containerID="aaa24f6d80c56bcfd364731903fc5f4cdd90ff4bbee89a586dd6053ee695e885" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.659306 4492 generic.go:334] "Generic (PLEG): container finished" podID="4c1466dc-153b-4c1f-be10-189ad16f05bb" containerID="69b5bddc8c981e161b9fa84670d049eda527a9b82b7883d32ad441c90a3ba2bd" exitCode=0 Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.659344 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" event={"ID":"4c1466dc-153b-4c1f-be10-189ad16f05bb","Type":"ContainerDied","Data":"69b5bddc8c981e161b9fa84670d049eda527a9b82b7883d32ad441c90a3ba2bd"} Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.659365 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" event={"ID":"4c1466dc-153b-4c1f-be10-189ad16f05bb","Type":"ContainerStarted","Data":"79212a3a20a25fc53185525f46bf25ba90ea27783b91720edda989cd00621116"} Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.689531 4492 scope.go:117] "RemoveContainer" containerID="091453ceffbcdba28c1faf3ceb72d57d44aa2842d16a36389a992d3e00062d50" Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.696640 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7457c658cc-888nk"] Nov 26 07:09:42 crc kubenswrapper[4492]: I1126 07:09:42.716726 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7457c658cc-888nk"] Nov 26 07:09:43 crc kubenswrapper[4492]: I1126 07:09:43.678389 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" event={"ID":"4c1466dc-153b-4c1f-be10-189ad16f05bb","Type":"ContainerStarted","Data":"068537db65a3b97672bdfe11d6c81c30ab6cf085662d87bb7823683e6ff5ce63"} Nov 26 07:09:43 crc kubenswrapper[4492]: I1126 07:09:43.679557 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:43 crc kubenswrapper[4492]: I1126 07:09:43.705652 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" podStartSLOduration=2.705633255 podStartE2EDuration="2.705633255s" podCreationTimestamp="2025-11-26 07:09:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:09:43.701869047 +0000 UTC m=+1279.585757345" watchObservedRunningTime="2025-11-26 07:09:43.705633255 +0000 UTC m=+1279.589521552" Nov 26 07:09:44 crc kubenswrapper[4492]: I1126 07:09:44.454067 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49076130-f96d-4cda-8aff-7fade5d53117" path="/var/lib/kubelet/pods/49076130-f96d-4cda-8aff-7fade5d53117/volumes" Nov 26 07:09:49 crc kubenswrapper[4492]: I1126 07:09:49.440996 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:09:49 crc kubenswrapper[4492]: I1126 07:09:49.441732 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:09:51 crc kubenswrapper[4492]: I1126 07:09:51.903361 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f9cdbb5d5-k4bxd" Nov 26 07:09:51 crc kubenswrapper[4492]: I1126 07:09:51.961916 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f5c57d7b9-64rz7"] Nov 26 07:09:51 crc kubenswrapper[4492]: I1126 07:09:51.962274 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" podUID="41ac2992-bfcb-4138-8f46-f5521b090f19" containerName="dnsmasq-dns" containerID="cri-o://74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492" gracePeriod=10 Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.553561 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.679544 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-swift-storage-0\") pod \"41ac2992-bfcb-4138-8f46-f5521b090f19\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.679609 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-config\") pod \"41ac2992-bfcb-4138-8f46-f5521b090f19\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.679775 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-svc\") pod \"41ac2992-bfcb-4138-8f46-f5521b090f19\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.679906 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-sb\") pod \"41ac2992-bfcb-4138-8f46-f5521b090f19\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.680089 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-nb\") pod \"41ac2992-bfcb-4138-8f46-f5521b090f19\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.680202 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qb24\" (UniqueName: \"kubernetes.io/projected/41ac2992-bfcb-4138-8f46-f5521b090f19-kube-api-access-4qb24\") pod \"41ac2992-bfcb-4138-8f46-f5521b090f19\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.680269 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-openstack-edpm-ipam\") pod \"41ac2992-bfcb-4138-8f46-f5521b090f19\" (UID: \"41ac2992-bfcb-4138-8f46-f5521b090f19\") " Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.692550 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41ac2992-bfcb-4138-8f46-f5521b090f19-kube-api-access-4qb24" (OuterVolumeSpecName: "kube-api-access-4qb24") pod "41ac2992-bfcb-4138-8f46-f5521b090f19" (UID: "41ac2992-bfcb-4138-8f46-f5521b090f19"). InnerVolumeSpecName "kube-api-access-4qb24". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.758910 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "41ac2992-bfcb-4138-8f46-f5521b090f19" (UID: "41ac2992-bfcb-4138-8f46-f5521b090f19"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.760354 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "41ac2992-bfcb-4138-8f46-f5521b090f19" (UID: "41ac2992-bfcb-4138-8f46-f5521b090f19"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.763462 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "41ac2992-bfcb-4138-8f46-f5521b090f19" (UID: "41ac2992-bfcb-4138-8f46-f5521b090f19"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.776255 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "41ac2992-bfcb-4138-8f46-f5521b090f19" (UID: "41ac2992-bfcb-4138-8f46-f5521b090f19"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.781964 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-config" (OuterVolumeSpecName: "config") pod "41ac2992-bfcb-4138-8f46-f5521b090f19" (UID: "41ac2992-bfcb-4138-8f46-f5521b090f19"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.785642 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.785666 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qb24\" (UniqueName: \"kubernetes.io/projected/41ac2992-bfcb-4138-8f46-f5521b090f19-kube-api-access-4qb24\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.785686 4492 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.785696 4492 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.785706 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-config\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.785717 4492 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.811548 4492 generic.go:334] "Generic (PLEG): container finished" podID="41ac2992-bfcb-4138-8f46-f5521b090f19" containerID="74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492" exitCode=0 Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.811607 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" event={"ID":"41ac2992-bfcb-4138-8f46-f5521b090f19","Type":"ContainerDied","Data":"74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492"} Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.811722 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.812675 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "41ac2992-bfcb-4138-8f46-f5521b090f19" (UID: "41ac2992-bfcb-4138-8f46-f5521b090f19"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.814121 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f5c57d7b9-64rz7" event={"ID":"41ac2992-bfcb-4138-8f46-f5521b090f19","Type":"ContainerDied","Data":"725871626b93ff2bf0c13df6f1af3d11f07b968522a6c82b06af311d975304dc"} Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.814439 4492 scope.go:117] "RemoveContainer" containerID="74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.841155 4492 scope.go:117] "RemoveContainer" containerID="9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.866830 4492 scope.go:117] "RemoveContainer" containerID="74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492" Nov 26 07:09:52 crc kubenswrapper[4492]: E1126 07:09:52.867286 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492\": container with ID starting with 74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492 not found: ID does not exist" containerID="74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.867326 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492"} err="failed to get container status \"74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492\": rpc error: code = NotFound desc = could not find container \"74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492\": container with ID starting with 74f3592b13d6b7af596ee9da5c825edaf0fd3431ac70d80288cc6aabe8b96492 not found: ID does not exist" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.867351 4492 scope.go:117] "RemoveContainer" containerID="9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762" Nov 26 07:09:52 crc kubenswrapper[4492]: E1126 07:09:52.867717 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762\": container with ID starting with 9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762 not found: ID does not exist" containerID="9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.867806 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762"} err="failed to get container status \"9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762\": rpc error: code = NotFound desc = could not find container \"9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762\": container with ID starting with 9d29b0f87a2c18e13b2ee4bd6d82b100fa154a6e1f2df5c96b8e4ba0a6346762 not found: ID does not exist" Nov 26 07:09:52 crc kubenswrapper[4492]: I1126 07:09:52.888731 4492 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/41ac2992-bfcb-4138-8f46-f5521b090f19-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:53 crc kubenswrapper[4492]: I1126 07:09:53.139240 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f5c57d7b9-64rz7"] Nov 26 07:09:53 crc kubenswrapper[4492]: I1126 07:09:53.146508 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f5c57d7b9-64rz7"] Nov 26 07:09:54 crc kubenswrapper[4492]: I1126 07:09:54.450595 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41ac2992-bfcb-4138-8f46-f5521b090f19" path="/var/lib/kubelet/pods/41ac2992-bfcb-4138-8f46-f5521b090f19/volumes" Nov 26 07:10:06 crc kubenswrapper[4492]: I1126 07:10:06.966069 4492 generic.go:334] "Generic (PLEG): container finished" podID="4e248296-fb08-4818-b4e7-6db19d55a3ba" containerID="17b2e83f3556d2851e4f5a7653628097c29a54aa4daae51c15d5c26165ba1bfe" exitCode=0 Nov 26 07:10:06 crc kubenswrapper[4492]: I1126 07:10:06.966160 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4e248296-fb08-4818-b4e7-6db19d55a3ba","Type":"ContainerDied","Data":"17b2e83f3556d2851e4f5a7653628097c29a54aa4daae51c15d5c26165ba1bfe"} Nov 26 07:10:06 crc kubenswrapper[4492]: I1126 07:10:06.967768 4492 generic.go:334] "Generic (PLEG): container finished" podID="118887c0-eace-4a16-a3b0-7049a057e69e" containerID="dfff54fb3b69379c742e4699e31c06ce1198fb45627c904a6b398731b9e59299" exitCode=0 Nov 26 07:10:06 crc kubenswrapper[4492]: I1126 07:10:06.967811 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"118887c0-eace-4a16-a3b0-7049a057e69e","Type":"ContainerDied","Data":"dfff54fb3b69379c742e4699e31c06ce1198fb45627c904a6b398731b9e59299"} Nov 26 07:10:07 crc kubenswrapper[4492]: I1126 07:10:07.981022 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4e248296-fb08-4818-b4e7-6db19d55a3ba","Type":"ContainerStarted","Data":"0d7b2475e8b6a5d39cd82aa8f5dee7ad445882ca7f5628fa1cfa7fa628a08316"} Nov 26 07:10:07 crc kubenswrapper[4492]: I1126 07:10:07.981742 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 07:10:07 crc kubenswrapper[4492]: I1126 07:10:07.983575 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"118887c0-eace-4a16-a3b0-7049a057e69e","Type":"ContainerStarted","Data":"55beb4f91ef17f47879f1c88b593abb332c4a181954955a9e4de9b76df3d3670"} Nov 26 07:10:07 crc kubenswrapper[4492]: I1126 07:10:07.984749 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:10:08 crc kubenswrapper[4492]: I1126 07:10:08.029401 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.02937801 podStartE2EDuration="37.02937801s" podCreationTimestamp="2025-11-26 07:09:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:10:08.027138339 +0000 UTC m=+1303.911026637" watchObservedRunningTime="2025-11-26 07:10:08.02937801 +0000 UTC m=+1303.913266308" Nov 26 07:10:08 crc kubenswrapper[4492]: I1126 07:10:08.064733 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.064713364 podStartE2EDuration="36.064713364s" podCreationTimestamp="2025-11-26 07:09:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:10:08.059154091 +0000 UTC m=+1303.943042389" watchObservedRunningTime="2025-11-26 07:10:08.064713364 +0000 UTC m=+1303.948601662" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.880891 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p"] Nov 26 07:10:09 crc kubenswrapper[4492]: E1126 07:10:09.881843 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49076130-f96d-4cda-8aff-7fade5d53117" containerName="dnsmasq-dns" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.881863 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="49076130-f96d-4cda-8aff-7fade5d53117" containerName="dnsmasq-dns" Nov 26 07:10:09 crc kubenswrapper[4492]: E1126 07:10:09.881879 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41ac2992-bfcb-4138-8f46-f5521b090f19" containerName="dnsmasq-dns" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.881885 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="41ac2992-bfcb-4138-8f46-f5521b090f19" containerName="dnsmasq-dns" Nov 26 07:10:09 crc kubenswrapper[4492]: E1126 07:10:09.881905 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49076130-f96d-4cda-8aff-7fade5d53117" containerName="init" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.881911 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="49076130-f96d-4cda-8aff-7fade5d53117" containerName="init" Nov 26 07:10:09 crc kubenswrapper[4492]: E1126 07:10:09.881925 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41ac2992-bfcb-4138-8f46-f5521b090f19" containerName="init" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.881932 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="41ac2992-bfcb-4138-8f46-f5521b090f19" containerName="init" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.882203 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="49076130-f96d-4cda-8aff-7fade5d53117" containerName="dnsmasq-dns" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.882221 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="41ac2992-bfcb-4138-8f46-f5521b090f19" containerName="dnsmasq-dns" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.883123 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.887350 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.887421 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.887438 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.887609 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:10:09 crc kubenswrapper[4492]: I1126 07:10:09.914479 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p"] Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.004851 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.005440 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.005793 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr749\" (UniqueName: \"kubernetes.io/projected/778d07cc-21c8-4705-9885-6b9d0e8659c9-kube-api-access-gr749\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.006024 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.107944 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.108065 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr749\" (UniqueName: \"kubernetes.io/projected/778d07cc-21c8-4705-9885-6b9d0e8659c9-kube-api-access-gr749\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.108114 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.108134 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.113984 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.116053 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.118572 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.122615 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr749\" (UniqueName: \"kubernetes.io/projected/778d07cc-21c8-4705-9885-6b9d0e8659c9-kube-api-access-gr749\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.211429 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.932735 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p"] Nov 26 07:10:10 crc kubenswrapper[4492]: W1126 07:10:10.949552 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod778d07cc_21c8_4705_9885_6b9d0e8659c9.slice/crio-f457bf0a0571e39bf39d95575bbad2032aa222f31756205564841d12eb869a61 WatchSource:0}: Error finding container f457bf0a0571e39bf39d95575bbad2032aa222f31756205564841d12eb869a61: Status 404 returned error can't find the container with id f457bf0a0571e39bf39d95575bbad2032aa222f31756205564841d12eb869a61 Nov 26 07:10:10 crc kubenswrapper[4492]: I1126 07:10:10.955460 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:10:11 crc kubenswrapper[4492]: I1126 07:10:11.066743 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" event={"ID":"778d07cc-21c8-4705-9885-6b9d0e8659c9","Type":"ContainerStarted","Data":"f457bf0a0571e39bf39d95575bbad2032aa222f31756205564841d12eb869a61"} Nov 26 07:10:19 crc kubenswrapper[4492]: I1126 07:10:19.442246 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:10:19 crc kubenswrapper[4492]: I1126 07:10:19.443003 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:10:19 crc kubenswrapper[4492]: I1126 07:10:19.443069 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:10:19 crc kubenswrapper[4492]: I1126 07:10:19.445883 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0dcfa8699dfdd37bac8bee745692f7cd2b7b8f4c90664301cb53b268008d376c"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:10:19 crc kubenswrapper[4492]: I1126 07:10:19.445986 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://0dcfa8699dfdd37bac8bee745692f7cd2b7b8f4c90664301cb53b268008d376c" gracePeriod=600 Nov 26 07:10:20 crc kubenswrapper[4492]: I1126 07:10:20.188134 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="0dcfa8699dfdd37bac8bee745692f7cd2b7b8f4c90664301cb53b268008d376c" exitCode=0 Nov 26 07:10:20 crc kubenswrapper[4492]: I1126 07:10:20.188545 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"0dcfa8699dfdd37bac8bee745692f7cd2b7b8f4c90664301cb53b268008d376c"} Nov 26 07:10:20 crc kubenswrapper[4492]: I1126 07:10:20.188604 4492 scope.go:117] "RemoveContainer" containerID="087b801b537b43d8dae36da1027953befbb4ce83f773382d5e7a1b8510080157" Nov 26 07:10:22 crc kubenswrapper[4492]: I1126 07:10:22.084430 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 07:10:22 crc kubenswrapper[4492]: I1126 07:10:22.222676 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" event={"ID":"778d07cc-21c8-4705-9885-6b9d0e8659c9","Type":"ContainerStarted","Data":"3cd759b00a8d0c8e95c876af573a74b3733b0c20992936c2d4499902945f6e7d"} Nov 26 07:10:22 crc kubenswrapper[4492]: I1126 07:10:22.225859 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227"} Nov 26 07:10:22 crc kubenswrapper[4492]: I1126 07:10:22.249040 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" podStartSLOduration=3.056846152 podStartE2EDuration="13.24902891s" podCreationTimestamp="2025-11-26 07:10:09 +0000 UTC" firstStartedPulling="2025-11-26 07:10:10.95451064 +0000 UTC m=+1306.838398939" lastFinishedPulling="2025-11-26 07:10:21.146693399 +0000 UTC m=+1317.030581697" observedRunningTime="2025-11-26 07:10:22.24396522 +0000 UTC m=+1318.127853519" watchObservedRunningTime="2025-11-26 07:10:22.24902891 +0000 UTC m=+1318.132917208" Nov 26 07:10:23 crc kubenswrapper[4492]: I1126 07:10:23.017380 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 07:10:33 crc kubenswrapper[4492]: I1126 07:10:33.339774 4492 generic.go:334] "Generic (PLEG): container finished" podID="778d07cc-21c8-4705-9885-6b9d0e8659c9" containerID="3cd759b00a8d0c8e95c876af573a74b3733b0c20992936c2d4499902945f6e7d" exitCode=0 Nov 26 07:10:33 crc kubenswrapper[4492]: I1126 07:10:33.340284 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" event={"ID":"778d07cc-21c8-4705-9885-6b9d0e8659c9","Type":"ContainerDied","Data":"3cd759b00a8d0c8e95c876af573a74b3733b0c20992936c2d4499902945f6e7d"} Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.750869 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.786595 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-repo-setup-combined-ca-bundle\") pod \"778d07cc-21c8-4705-9885-6b9d0e8659c9\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.786936 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-inventory\") pod \"778d07cc-21c8-4705-9885-6b9d0e8659c9\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.786989 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-ssh-key\") pod \"778d07cc-21c8-4705-9885-6b9d0e8659c9\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.787027 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gr749\" (UniqueName: \"kubernetes.io/projected/778d07cc-21c8-4705-9885-6b9d0e8659c9-kube-api-access-gr749\") pod \"778d07cc-21c8-4705-9885-6b9d0e8659c9\" (UID: \"778d07cc-21c8-4705-9885-6b9d0e8659c9\") " Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.794631 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "778d07cc-21c8-4705-9885-6b9d0e8659c9" (UID: "778d07cc-21c8-4705-9885-6b9d0e8659c9"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.805980 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/778d07cc-21c8-4705-9885-6b9d0e8659c9-kube-api-access-gr749" (OuterVolumeSpecName: "kube-api-access-gr749") pod "778d07cc-21c8-4705-9885-6b9d0e8659c9" (UID: "778d07cc-21c8-4705-9885-6b9d0e8659c9"). InnerVolumeSpecName "kube-api-access-gr749". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.820975 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "778d07cc-21c8-4705-9885-6b9d0e8659c9" (UID: "778d07cc-21c8-4705-9885-6b9d0e8659c9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.827051 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-inventory" (OuterVolumeSpecName: "inventory") pod "778d07cc-21c8-4705-9885-6b9d0e8659c9" (UID: "778d07cc-21c8-4705-9885-6b9d0e8659c9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.890316 4492 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.890361 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.890377 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/778d07cc-21c8-4705-9885-6b9d0e8659c9-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:34 crc kubenswrapper[4492]: I1126 07:10:34.890388 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gr749\" (UniqueName: \"kubernetes.io/projected/778d07cc-21c8-4705-9885-6b9d0e8659c9-kube-api-access-gr749\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.361013 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" event={"ID":"778d07cc-21c8-4705-9885-6b9d0e8659c9","Type":"ContainerDied","Data":"f457bf0a0571e39bf39d95575bbad2032aa222f31756205564841d12eb869a61"} Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.361285 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f457bf0a0571e39bf39d95575bbad2032aa222f31756205564841d12eb869a61" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.361084 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-fzg9p" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.440729 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d"] Nov 26 07:10:35 crc kubenswrapper[4492]: E1126 07:10:35.441197 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="778d07cc-21c8-4705-9885-6b9d0e8659c9" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.441216 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="778d07cc-21c8-4705-9885-6b9d0e8659c9" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.441460 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="778d07cc-21c8-4705-9885-6b9d0e8659c9" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.442426 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.444298 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.445115 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.445372 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.448159 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.452319 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d"] Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.501679 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bcr9d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.501767 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bcr9d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.501800 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bd7p\" (UniqueName: \"kubernetes.io/projected/66170769-cf14-4851-9687-12804dc6888d-kube-api-access-8bd7p\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bcr9d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.604014 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bcr9d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.604167 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bcr9d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.604240 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bd7p\" (UniqueName: \"kubernetes.io/projected/66170769-cf14-4851-9687-12804dc6888d-kube-api-access-8bd7p\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bcr9d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.608535 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bcr9d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.610433 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bcr9d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.620827 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bd7p\" (UniqueName: \"kubernetes.io/projected/66170769-cf14-4851-9687-12804dc6888d-kube-api-access-8bd7p\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-bcr9d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:35 crc kubenswrapper[4492]: I1126 07:10:35.756149 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:36 crc kubenswrapper[4492]: I1126 07:10:36.301460 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d"] Nov 26 07:10:36 crc kubenswrapper[4492]: W1126 07:10:36.306386 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod66170769_cf14_4851_9687_12804dc6888d.slice/crio-7ab4b6aeb55a55258f7719170e15c5b1e572f3515ed2e4af592db701a1478a82 WatchSource:0}: Error finding container 7ab4b6aeb55a55258f7719170e15c5b1e572f3515ed2e4af592db701a1478a82: Status 404 returned error can't find the container with id 7ab4b6aeb55a55258f7719170e15c5b1e572f3515ed2e4af592db701a1478a82 Nov 26 07:10:36 crc kubenswrapper[4492]: I1126 07:10:36.370076 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" event={"ID":"66170769-cf14-4851-9687-12804dc6888d","Type":"ContainerStarted","Data":"7ab4b6aeb55a55258f7719170e15c5b1e572f3515ed2e4af592db701a1478a82"} Nov 26 07:10:37 crc kubenswrapper[4492]: I1126 07:10:37.382318 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" event={"ID":"66170769-cf14-4851-9687-12804dc6888d","Type":"ContainerStarted","Data":"09f7da51a7c42a782c7e0832b539a433e6232d2f8abab720485f75e00f2e8ecb"} Nov 26 07:10:37 crc kubenswrapper[4492]: I1126 07:10:37.407897 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" podStartSLOduration=1.806232304 podStartE2EDuration="2.407883097s" podCreationTimestamp="2025-11-26 07:10:35 +0000 UTC" firstStartedPulling="2025-11-26 07:10:36.308699753 +0000 UTC m=+1332.192588051" lastFinishedPulling="2025-11-26 07:10:36.910350547 +0000 UTC m=+1332.794238844" observedRunningTime="2025-11-26 07:10:37.397354693 +0000 UTC m=+1333.281242991" watchObservedRunningTime="2025-11-26 07:10:37.407883097 +0000 UTC m=+1333.291771395" Nov 26 07:10:39 crc kubenswrapper[4492]: I1126 07:10:39.407408 4492 generic.go:334] "Generic (PLEG): container finished" podID="66170769-cf14-4851-9687-12804dc6888d" containerID="09f7da51a7c42a782c7e0832b539a433e6232d2f8abab720485f75e00f2e8ecb" exitCode=0 Nov 26 07:10:39 crc kubenswrapper[4492]: I1126 07:10:39.407500 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" event={"ID":"66170769-cf14-4851-9687-12804dc6888d","Type":"ContainerDied","Data":"09f7da51a7c42a782c7e0832b539a433e6232d2f8abab720485f75e00f2e8ecb"} Nov 26 07:10:40 crc kubenswrapper[4492]: I1126 07:10:40.775812 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:40 crc kubenswrapper[4492]: I1126 07:10:40.925870 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-inventory\") pod \"66170769-cf14-4851-9687-12804dc6888d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " Nov 26 07:10:40 crc kubenswrapper[4492]: I1126 07:10:40.925932 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bd7p\" (UniqueName: \"kubernetes.io/projected/66170769-cf14-4851-9687-12804dc6888d-kube-api-access-8bd7p\") pod \"66170769-cf14-4851-9687-12804dc6888d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " Nov 26 07:10:40 crc kubenswrapper[4492]: I1126 07:10:40.926337 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-ssh-key\") pod \"66170769-cf14-4851-9687-12804dc6888d\" (UID: \"66170769-cf14-4851-9687-12804dc6888d\") " Nov 26 07:10:40 crc kubenswrapper[4492]: I1126 07:10:40.933016 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66170769-cf14-4851-9687-12804dc6888d-kube-api-access-8bd7p" (OuterVolumeSpecName: "kube-api-access-8bd7p") pod "66170769-cf14-4851-9687-12804dc6888d" (UID: "66170769-cf14-4851-9687-12804dc6888d"). InnerVolumeSpecName "kube-api-access-8bd7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:10:40 crc kubenswrapper[4492]: I1126 07:10:40.956993 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "66170769-cf14-4851-9687-12804dc6888d" (UID: "66170769-cf14-4851-9687-12804dc6888d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:10:40 crc kubenswrapper[4492]: I1126 07:10:40.957548 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-inventory" (OuterVolumeSpecName: "inventory") pod "66170769-cf14-4851-9687-12804dc6888d" (UID: "66170769-cf14-4851-9687-12804dc6888d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.030125 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bd7p\" (UniqueName: \"kubernetes.io/projected/66170769-cf14-4851-9687-12804dc6888d-kube-api-access-8bd7p\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.030246 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.030304 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66170769-cf14-4851-9687-12804dc6888d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.432785 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" event={"ID":"66170769-cf14-4851-9687-12804dc6888d","Type":"ContainerDied","Data":"7ab4b6aeb55a55258f7719170e15c5b1e572f3515ed2e4af592db701a1478a82"} Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.433158 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ab4b6aeb55a55258f7719170e15c5b1e572f3515ed2e4af592db701a1478a82" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.432861 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-bcr9d" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.513670 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l"] Nov 26 07:10:41 crc kubenswrapper[4492]: E1126 07:10:41.514087 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66170769-cf14-4851-9687-12804dc6888d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.514105 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="66170769-cf14-4851-9687-12804dc6888d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.514274 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="66170769-cf14-4851-9687-12804dc6888d" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.514908 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.516751 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.520455 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.520525 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.520928 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.529396 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l"] Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.547944 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9g4x\" (UniqueName: \"kubernetes.io/projected/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-kube-api-access-l9g4x\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.548020 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.548051 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.548092 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.650382 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.650444 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.650499 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.650689 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9g4x\" (UniqueName: \"kubernetes.io/projected/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-kube-api-access-l9g4x\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.655538 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.655759 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.662014 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.662963 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9g4x\" (UniqueName: \"kubernetes.io/projected/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-kube-api-access-l9g4x\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:41 crc kubenswrapper[4492]: I1126 07:10:41.829906 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:10:42 crc kubenswrapper[4492]: I1126 07:10:42.392742 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l"] Nov 26 07:10:42 crc kubenswrapper[4492]: I1126 07:10:42.452068 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" event={"ID":"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1","Type":"ContainerStarted","Data":"13d47d192b453229457f76b2a147128e5d18803b5c6030a352cdf5a6ef9a5b3f"} Nov 26 07:10:43 crc kubenswrapper[4492]: I1126 07:10:43.457001 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" event={"ID":"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1","Type":"ContainerStarted","Data":"31d3015b6a545951b4f95feddaa6ffa7baa08e72f024dac6b83bb4a5f4cc79e0"} Nov 26 07:10:43 crc kubenswrapper[4492]: I1126 07:10:43.483991 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" podStartSLOduration=2.017945943 podStartE2EDuration="2.483967621s" podCreationTimestamp="2025-11-26 07:10:41 +0000 UTC" firstStartedPulling="2025-11-26 07:10:42.393872624 +0000 UTC m=+1338.277760922" lastFinishedPulling="2025-11-26 07:10:42.859894302 +0000 UTC m=+1338.743782600" observedRunningTime="2025-11-26 07:10:43.479314563 +0000 UTC m=+1339.363202861" watchObservedRunningTime="2025-11-26 07:10:43.483967621 +0000 UTC m=+1339.367855919" Nov 26 07:11:56 crc kubenswrapper[4492]: I1126 07:11:56.761945 4492 scope.go:117] "RemoveContainer" containerID="1b096e66db6b7b4cd6cc6e1d50b3955fff54d6894ade0cf6c6ae975af77b78d0" Nov 26 07:11:56 crc kubenswrapper[4492]: I1126 07:11:56.789851 4492 scope.go:117] "RemoveContainer" containerID="688e1ee5135a74741f4ad3c8139cc1c161a858d58e4c1bbb4a48916b444dc333" Nov 26 07:12:49 crc kubenswrapper[4492]: I1126 07:12:49.441212 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:12:49 crc kubenswrapper[4492]: I1126 07:12:49.441929 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:13:19 crc kubenswrapper[4492]: I1126 07:13:19.441574 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:13:19 crc kubenswrapper[4492]: I1126 07:13:19.442312 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:13:43 crc kubenswrapper[4492]: I1126 07:13:43.241486 4492 generic.go:334] "Generic (PLEG): container finished" podID="cd3af2e6-c562-4e41-b9ad-61af9c14f7a1" containerID="31d3015b6a545951b4f95feddaa6ffa7baa08e72f024dac6b83bb4a5f4cc79e0" exitCode=0 Nov 26 07:13:43 crc kubenswrapper[4492]: I1126 07:13:43.242326 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" event={"ID":"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1","Type":"ContainerDied","Data":"31d3015b6a545951b4f95feddaa6ffa7baa08e72f024dac6b83bb4a5f4cc79e0"} Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.627518 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.767834 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-ssh-key\") pod \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.768004 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-inventory\") pod \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.768196 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-bootstrap-combined-ca-bundle\") pod \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.768641 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9g4x\" (UniqueName: \"kubernetes.io/projected/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-kube-api-access-l9g4x\") pod \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\" (UID: \"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1\") " Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.774055 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "cd3af2e6-c562-4e41-b9ad-61af9c14f7a1" (UID: "cd3af2e6-c562-4e41-b9ad-61af9c14f7a1"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.775244 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-kube-api-access-l9g4x" (OuterVolumeSpecName: "kube-api-access-l9g4x") pod "cd3af2e6-c562-4e41-b9ad-61af9c14f7a1" (UID: "cd3af2e6-c562-4e41-b9ad-61af9c14f7a1"). InnerVolumeSpecName "kube-api-access-l9g4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.793791 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cd3af2e6-c562-4e41-b9ad-61af9c14f7a1" (UID: "cd3af2e6-c562-4e41-b9ad-61af9c14f7a1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.795813 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-inventory" (OuterVolumeSpecName: "inventory") pod "cd3af2e6-c562-4e41-b9ad-61af9c14f7a1" (UID: "cd3af2e6-c562-4e41-b9ad-61af9c14f7a1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.871551 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9g4x\" (UniqueName: \"kubernetes.io/projected/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-kube-api-access-l9g4x\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.871593 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.871605 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:44 crc kubenswrapper[4492]: I1126 07:13:44.871617 4492 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3af2e6-c562-4e41-b9ad-61af9c14f7a1-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.264818 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" event={"ID":"cd3af2e6-c562-4e41-b9ad-61af9c14f7a1","Type":"ContainerDied","Data":"13d47d192b453229457f76b2a147128e5d18803b5c6030a352cdf5a6ef9a5b3f"} Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.265205 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13d47d192b453229457f76b2a147128e5d18803b5c6030a352cdf5a6ef9a5b3f" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.264891 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m5n9l" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.362053 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs"] Nov 26 07:13:45 crc kubenswrapper[4492]: E1126 07:13:45.362567 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd3af2e6-c562-4e41-b9ad-61af9c14f7a1" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.362587 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd3af2e6-c562-4e41-b9ad-61af9c14f7a1" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.362792 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd3af2e6-c562-4e41-b9ad-61af9c14f7a1" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.363506 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.365929 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.366277 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.368011 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.368138 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.379093 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs"] Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.490291 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7nkr\" (UniqueName: \"kubernetes.io/projected/016b702a-3d42-416f-9c38-007854d31be0-kube-api-access-r7nkr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.490540 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.490579 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.593239 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.593484 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.594523 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7nkr\" (UniqueName: \"kubernetes.io/projected/016b702a-3d42-416f-9c38-007854d31be0-kube-api-access-r7nkr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.598607 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.599073 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.614586 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7nkr\" (UniqueName: \"kubernetes.io/projected/016b702a-3d42-416f-9c38-007854d31be0-kube-api-access-r7nkr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:45 crc kubenswrapper[4492]: I1126 07:13:45.683737 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:13:46 crc kubenswrapper[4492]: I1126 07:13:46.214009 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs"] Nov 26 07:13:46 crc kubenswrapper[4492]: I1126 07:13:46.275202 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" event={"ID":"016b702a-3d42-416f-9c38-007854d31be0","Type":"ContainerStarted","Data":"5951be8e43089d0343a70233b6faee47114d7e1b7e5f9f561af70dbf87497aa3"} Nov 26 07:13:47 crc kubenswrapper[4492]: I1126 07:13:47.291631 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" event={"ID":"016b702a-3d42-416f-9c38-007854d31be0","Type":"ContainerStarted","Data":"82bf4c870aaebf9152a5bb99730315eb6e792e4a0d2de352ed9486081a3b7350"} Nov 26 07:13:47 crc kubenswrapper[4492]: I1126 07:13:47.327256 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" podStartSLOduration=1.8143324349999999 podStartE2EDuration="2.327242378s" podCreationTimestamp="2025-11-26 07:13:45 +0000 UTC" firstStartedPulling="2025-11-26 07:13:46.210891025 +0000 UTC m=+1522.094779324" lastFinishedPulling="2025-11-26 07:13:46.723800969 +0000 UTC m=+1522.607689267" observedRunningTime="2025-11-26 07:13:47.317723452 +0000 UTC m=+1523.201611749" watchObservedRunningTime="2025-11-26 07:13:47.327242378 +0000 UTC m=+1523.211130677" Nov 26 07:13:49 crc kubenswrapper[4492]: I1126 07:13:49.441145 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:13:49 crc kubenswrapper[4492]: I1126 07:13:49.441584 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:13:49 crc kubenswrapper[4492]: I1126 07:13:49.441648 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:13:49 crc kubenswrapper[4492]: I1126 07:13:49.442126 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:13:49 crc kubenswrapper[4492]: I1126 07:13:49.442245 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" gracePeriod=600 Nov 26 07:13:49 crc kubenswrapper[4492]: E1126 07:13:49.565961 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:13:50 crc kubenswrapper[4492]: I1126 07:13:50.328204 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" exitCode=0 Nov 26 07:13:50 crc kubenswrapper[4492]: I1126 07:13:50.328602 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227"} Nov 26 07:13:50 crc kubenswrapper[4492]: I1126 07:13:50.328657 4492 scope.go:117] "RemoveContainer" containerID="0dcfa8699dfdd37bac8bee745692f7cd2b7b8f4c90664301cb53b268008d376c" Nov 26 07:13:50 crc kubenswrapper[4492]: I1126 07:13:50.329722 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:13:50 crc kubenswrapper[4492]: E1126 07:13:50.330140 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:14:05 crc kubenswrapper[4492]: I1126 07:14:05.439251 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:14:05 crc kubenswrapper[4492]: E1126 07:14:05.440552 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.561609 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fdnmq"] Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.563833 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.583045 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdnmq"] Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.691132 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-catalog-content\") pod \"redhat-marketplace-fdnmq\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.691314 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb56k\" (UniqueName: \"kubernetes.io/projected/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-kube-api-access-qb56k\") pod \"redhat-marketplace-fdnmq\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.691440 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-utilities\") pod \"redhat-marketplace-fdnmq\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.793283 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-catalog-content\") pod \"redhat-marketplace-fdnmq\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.793430 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb56k\" (UniqueName: \"kubernetes.io/projected/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-kube-api-access-qb56k\") pod \"redhat-marketplace-fdnmq\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.793502 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-utilities\") pod \"redhat-marketplace-fdnmq\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.793777 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-catalog-content\") pod \"redhat-marketplace-fdnmq\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.793856 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-utilities\") pod \"redhat-marketplace-fdnmq\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.813081 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb56k\" (UniqueName: \"kubernetes.io/projected/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-kube-api-access-qb56k\") pod \"redhat-marketplace-fdnmq\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:06 crc kubenswrapper[4492]: I1126 07:14:06.880627 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:07 crc kubenswrapper[4492]: I1126 07:14:07.325091 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdnmq"] Nov 26 07:14:07 crc kubenswrapper[4492]: W1126 07:14:07.338696 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8cfbd334_8ed7_4cb1_a2fd_3a696cf1ae31.slice/crio-f9b3b6ce76fb2998a539341d8caac019b0342712a751f32205619da82679a060 WatchSource:0}: Error finding container f9b3b6ce76fb2998a539341d8caac019b0342712a751f32205619da82679a060: Status 404 returned error can't find the container with id f9b3b6ce76fb2998a539341d8caac019b0342712a751f32205619da82679a060 Nov 26 07:14:07 crc kubenswrapper[4492]: I1126 07:14:07.534585 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdnmq" event={"ID":"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31","Type":"ContainerStarted","Data":"8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620"} Nov 26 07:14:07 crc kubenswrapper[4492]: I1126 07:14:07.534915 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdnmq" event={"ID":"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31","Type":"ContainerStarted","Data":"f9b3b6ce76fb2998a539341d8caac019b0342712a751f32205619da82679a060"} Nov 26 07:14:08 crc kubenswrapper[4492]: I1126 07:14:08.545742 4492 generic.go:334] "Generic (PLEG): container finished" podID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerID="8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620" exitCode=0 Nov 26 07:14:08 crc kubenswrapper[4492]: I1126 07:14:08.545832 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdnmq" event={"ID":"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31","Type":"ContainerDied","Data":"8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620"} Nov 26 07:14:08 crc kubenswrapper[4492]: I1126 07:14:08.546099 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdnmq" event={"ID":"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31","Type":"ContainerStarted","Data":"f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e"} Nov 26 07:14:09 crc kubenswrapper[4492]: I1126 07:14:09.577321 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdnmq" event={"ID":"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31","Type":"ContainerDied","Data":"f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e"} Nov 26 07:14:09 crc kubenswrapper[4492]: I1126 07:14:09.577307 4492 generic.go:334] "Generic (PLEG): container finished" podID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerID="f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e" exitCode=0 Nov 26 07:14:10 crc kubenswrapper[4492]: I1126 07:14:10.591365 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdnmq" event={"ID":"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31","Type":"ContainerStarted","Data":"d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900"} Nov 26 07:14:10 crc kubenswrapper[4492]: I1126 07:14:10.619807 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fdnmq" podStartSLOduration=2.03308342 podStartE2EDuration="4.619787897s" podCreationTimestamp="2025-11-26 07:14:06 +0000 UTC" firstStartedPulling="2025-11-26 07:14:07.536839341 +0000 UTC m=+1543.420727639" lastFinishedPulling="2025-11-26 07:14:10.123543817 +0000 UTC m=+1546.007432116" observedRunningTime="2025-11-26 07:14:10.611697016 +0000 UTC m=+1546.495585314" watchObservedRunningTime="2025-11-26 07:14:10.619787897 +0000 UTC m=+1546.503676196" Nov 26 07:14:16 crc kubenswrapper[4492]: I1126 07:14:16.880806 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:16 crc kubenswrapper[4492]: I1126 07:14:16.881568 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:16 crc kubenswrapper[4492]: I1126 07:14:16.926283 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:17 crc kubenswrapper[4492]: I1126 07:14:17.705429 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:17 crc kubenswrapper[4492]: I1126 07:14:17.760811 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdnmq"] Nov 26 07:14:19 crc kubenswrapper[4492]: I1126 07:14:19.683198 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fdnmq" podUID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerName="registry-server" containerID="cri-o://d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900" gracePeriod=2 Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.123286 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.320348 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qb56k\" (UniqueName: \"kubernetes.io/projected/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-kube-api-access-qb56k\") pod \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.320795 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-catalog-content\") pod \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.320827 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-utilities\") pod \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\" (UID: \"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31\") " Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.321809 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-utilities" (OuterVolumeSpecName: "utilities") pod "8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" (UID: "8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.327746 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-kube-api-access-qb56k" (OuterVolumeSpecName: "kube-api-access-qb56k") pod "8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" (UID: "8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31"). InnerVolumeSpecName "kube-api-access-qb56k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.336437 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" (UID: "8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.423818 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qb56k\" (UniqueName: \"kubernetes.io/projected/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-kube-api-access-qb56k\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.423856 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.423866 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.438600 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:14:20 crc kubenswrapper[4492]: E1126 07:14:20.439025 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.696376 4492 generic.go:334] "Generic (PLEG): container finished" podID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerID="d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900" exitCode=0 Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.696443 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fdnmq" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.696439 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdnmq" event={"ID":"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31","Type":"ContainerDied","Data":"d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900"} Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.697113 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fdnmq" event={"ID":"8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31","Type":"ContainerDied","Data":"f9b3b6ce76fb2998a539341d8caac019b0342712a751f32205619da82679a060"} Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.697154 4492 scope.go:117] "RemoveContainer" containerID="d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.728911 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdnmq"] Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.737771 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fdnmq"] Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.751766 4492 scope.go:117] "RemoveContainer" containerID="f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.771776 4492 scope.go:117] "RemoveContainer" containerID="8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.819559 4492 scope.go:117] "RemoveContainer" containerID="d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900" Nov 26 07:14:20 crc kubenswrapper[4492]: E1126 07:14:20.820047 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900\": container with ID starting with d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900 not found: ID does not exist" containerID="d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.820086 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900"} err="failed to get container status \"d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900\": rpc error: code = NotFound desc = could not find container \"d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900\": container with ID starting with d1944645cbae576d720c234c751a549e87d6ada969e7e2b9a8f94e3538087900 not found: ID does not exist" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.820129 4492 scope.go:117] "RemoveContainer" containerID="f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e" Nov 26 07:14:20 crc kubenswrapper[4492]: E1126 07:14:20.822269 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e\": container with ID starting with f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e not found: ID does not exist" containerID="f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.822302 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e"} err="failed to get container status \"f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e\": rpc error: code = NotFound desc = could not find container \"f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e\": container with ID starting with f10d57b0d790abfb97ad3e16040746fd7375d1327e1ac1f9d943817b7ab27b7e not found: ID does not exist" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.822323 4492 scope.go:117] "RemoveContainer" containerID="8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620" Nov 26 07:14:20 crc kubenswrapper[4492]: E1126 07:14:20.822836 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620\": container with ID starting with 8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620 not found: ID does not exist" containerID="8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620" Nov 26 07:14:20 crc kubenswrapper[4492]: I1126 07:14:20.822859 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620"} err="failed to get container status \"8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620\": rpc error: code = NotFound desc = could not find container \"8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620\": container with ID starting with 8b1452d041868b59412c886288652d6b61f9a2ceee540c7373cc77b2743ab620 not found: ID does not exist" Nov 26 07:14:22 crc kubenswrapper[4492]: I1126 07:14:22.450849 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" path="/var/lib/kubelet/pods/8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31/volumes" Nov 26 07:14:32 crc kubenswrapper[4492]: I1126 07:14:32.439389 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:14:32 crc kubenswrapper[4492]: E1126 07:14:32.440216 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.072572 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-mwl7j"] Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.094918 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-19de-account-create-update-n48h2"] Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.101697 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-mwl7j"] Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.107789 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-19de-account-create-update-n48h2"] Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.113419 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-m9nq2"] Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.118836 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-b651-account-create-update-w2d6t"] Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.124801 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-m9nq2"] Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.130084 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-b651-account-create-update-w2d6t"] Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.451735 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ff43c91-26eb-40da-a2cc-9d07c1e3be1c" path="/var/lib/kubelet/pods/0ff43c91-26eb-40da-a2cc-9d07c1e3be1c/volumes" Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.454267 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38aafecf-4ce9-460d-8a3d-aa42b566ab81" path="/var/lib/kubelet/pods/38aafecf-4ce9-460d-8a3d-aa42b566ab81/volumes" Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.456105 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7788ab37-0666-48ca-aff9-b145fc248e4c" path="/var/lib/kubelet/pods/7788ab37-0666-48ca-aff9-b145fc248e4c/volumes" Nov 26 07:14:34 crc kubenswrapper[4492]: I1126 07:14:34.457511 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af5046a4-bfe1-474f-8954-ada04116efa6" path="/var/lib/kubelet/pods/af5046a4-bfe1-474f-8954-ada04116efa6/volumes" Nov 26 07:14:38 crc kubenswrapper[4492]: I1126 07:14:38.028862 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-txt7d"] Nov 26 07:14:38 crc kubenswrapper[4492]: I1126 07:14:38.034556 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-89b0-account-create-update-f54f8"] Nov 26 07:14:38 crc kubenswrapper[4492]: I1126 07:14:38.039695 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-txt7d"] Nov 26 07:14:38 crc kubenswrapper[4492]: I1126 07:14:38.045617 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-89b0-account-create-update-f54f8"] Nov 26 07:14:38 crc kubenswrapper[4492]: I1126 07:14:38.453512 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="282cd276-6463-4ade-8c5a-a7682fc10269" path="/var/lib/kubelet/pods/282cd276-6463-4ade-8c5a-a7682fc10269/volumes" Nov 26 07:14:38 crc kubenswrapper[4492]: I1126 07:14:38.455818 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35c76cd0-263a-4ad9-8d69-5fa9960652f3" path="/var/lib/kubelet/pods/35c76cd0-263a-4ad9-8d69-5fa9960652f3/volumes" Nov 26 07:14:44 crc kubenswrapper[4492]: I1126 07:14:44.444379 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:14:44 crc kubenswrapper[4492]: E1126 07:14:44.445123 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:14:56 crc kubenswrapper[4492]: I1126 07:14:56.928106 4492 scope.go:117] "RemoveContainer" containerID="50a19b459fca099d754e857e8a42c1a6a26f7f26b34b2fbc6fc3a6f17dc0355d" Nov 26 07:14:56 crc kubenswrapper[4492]: I1126 07:14:56.968595 4492 scope.go:117] "RemoveContainer" containerID="d2f8d830e51b4d867cb451729dc1f9967f17e8cf5b60090c697ff70977a76be5" Nov 26 07:14:57 crc kubenswrapper[4492]: I1126 07:14:57.001577 4492 scope.go:117] "RemoveContainer" containerID="a29190ed28c314ec902e58e562106baebba6067c4ffa1da33fcdcb25bf62f805" Nov 26 07:14:57 crc kubenswrapper[4492]: I1126 07:14:57.038187 4492 scope.go:117] "RemoveContainer" containerID="52fff9fdbf3891e54d5df5dedee4db6dd8022e4e02bdb8cbd05bb01b22b1e39f" Nov 26 07:14:57 crc kubenswrapper[4492]: I1126 07:14:57.073901 4492 scope.go:117] "RemoveContainer" containerID="48c8e66bb4cec5ab05350b6d4032ff9c8cd7464acdac7323636e8673eb2aad69" Nov 26 07:14:57 crc kubenswrapper[4492]: I1126 07:14:57.114182 4492 scope.go:117] "RemoveContainer" containerID="df30eded3806eebf464d5499f35ee4fcef7419a8cc33fabd25ef30117d8e85ea" Nov 26 07:14:57 crc kubenswrapper[4492]: I1126 07:14:57.439017 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:14:57 crc kubenswrapper[4492]: E1126 07:14:57.439593 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.148188 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz"] Nov 26 07:15:00 crc kubenswrapper[4492]: E1126 07:15:00.149025 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerName="registry-server" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.149040 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerName="registry-server" Nov 26 07:15:00 crc kubenswrapper[4492]: E1126 07:15:00.149054 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerName="extract-utilities" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.149061 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerName="extract-utilities" Nov 26 07:15:00 crc kubenswrapper[4492]: E1126 07:15:00.149073 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerName="extract-content" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.149079 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerName="extract-content" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.149283 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cfbd334-8ed7-4cb1-a2fd-3a696cf1ae31" containerName="registry-server" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.150075 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.152279 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.152915 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.157454 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz"] Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.170009 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-secret-volume\") pod \"collect-profiles-29402355-dtvcz\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.170138 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xprcd\" (UniqueName: \"kubernetes.io/projected/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-kube-api-access-xprcd\") pod \"collect-profiles-29402355-dtvcz\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.170219 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-config-volume\") pod \"collect-profiles-29402355-dtvcz\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.272109 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-secret-volume\") pod \"collect-profiles-29402355-dtvcz\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.272201 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xprcd\" (UniqueName: \"kubernetes.io/projected/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-kube-api-access-xprcd\") pod \"collect-profiles-29402355-dtvcz\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.272344 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-config-volume\") pod \"collect-profiles-29402355-dtvcz\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.273459 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-config-volume\") pod \"collect-profiles-29402355-dtvcz\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.279726 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-secret-volume\") pod \"collect-profiles-29402355-dtvcz\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.287913 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xprcd\" (UniqueName: \"kubernetes.io/projected/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-kube-api-access-xprcd\") pod \"collect-profiles-29402355-dtvcz\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.479655 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:00 crc kubenswrapper[4492]: I1126 07:15:00.921285 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz"] Nov 26 07:15:01 crc kubenswrapper[4492]: I1126 07:15:01.105497 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" event={"ID":"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca","Type":"ContainerStarted","Data":"0057352d6d6f7dc49a3a43dc4de6c73d7915f2cc7e378ea0cdab4c0254d2338a"} Nov 26 07:15:01 crc kubenswrapper[4492]: I1126 07:15:01.105913 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" event={"ID":"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca","Type":"ContainerStarted","Data":"c85897685c0feec502ad4a90dfe47f7e904a5ba9740f5186f64c353c92c697b5"} Nov 26 07:15:01 crc kubenswrapper[4492]: I1126 07:15:01.131724 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" podStartSLOduration=1.1316980939999999 podStartE2EDuration="1.131698094s" podCreationTimestamp="2025-11-26 07:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:15:01.123105428 +0000 UTC m=+1597.006993727" watchObservedRunningTime="2025-11-26 07:15:01.131698094 +0000 UTC m=+1597.015586392" Nov 26 07:15:02 crc kubenswrapper[4492]: I1126 07:15:02.053754 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-phkjq"] Nov 26 07:15:02 crc kubenswrapper[4492]: I1126 07:15:02.061968 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-phkjq"] Nov 26 07:15:02 crc kubenswrapper[4492]: I1126 07:15:02.116194 4492 generic.go:334] "Generic (PLEG): container finished" podID="4cd3ee71-cc07-4cbb-93aa-f732ffa793ca" containerID="0057352d6d6f7dc49a3a43dc4de6c73d7915f2cc7e378ea0cdab4c0254d2338a" exitCode=0 Nov 26 07:15:02 crc kubenswrapper[4492]: I1126 07:15:02.116276 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" event={"ID":"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca","Type":"ContainerDied","Data":"0057352d6d6f7dc49a3a43dc4de6c73d7915f2cc7e378ea0cdab4c0254d2338a"} Nov 26 07:15:02 crc kubenswrapper[4492]: I1126 07:15:02.449415 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9770e190-04af-494f-9a6a-f3b242b9b9ad" path="/var/lib/kubelet/pods/9770e190-04af-494f-9a6a-f3b242b9b9ad/volumes" Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.036849 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-fccc-account-create-update-cxnnm"] Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.043892 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1cd3-account-create-update-hp2bv"] Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.049446 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-nl5zs"] Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.056878 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-s6jw9"] Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.064988 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-fccc-account-create-update-cxnnm"] Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.070574 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-1cd3-account-create-update-hp2bv"] Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.075932 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-nl5zs"] Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.081370 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-s6jw9"] Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.431474 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.569988 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-secret-volume\") pod \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.571134 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xprcd\" (UniqueName: \"kubernetes.io/projected/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-kube-api-access-xprcd\") pod \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.571218 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-config-volume\") pod \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\" (UID: \"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca\") " Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.571855 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-config-volume" (OuterVolumeSpecName: "config-volume") pod "4cd3ee71-cc07-4cbb-93aa-f732ffa793ca" (UID: "4cd3ee71-cc07-4cbb-93aa-f732ffa793ca"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.572111 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.578147 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4cd3ee71-cc07-4cbb-93aa-f732ffa793ca" (UID: "4cd3ee71-cc07-4cbb-93aa-f732ffa793ca"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.578990 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-kube-api-access-xprcd" (OuterVolumeSpecName: "kube-api-access-xprcd") pod "4cd3ee71-cc07-4cbb-93aa-f732ffa793ca" (UID: "4cd3ee71-cc07-4cbb-93aa-f732ffa793ca"). InnerVolumeSpecName "kube-api-access-xprcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.674735 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:03 crc kubenswrapper[4492]: I1126 07:15:03.674783 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xprcd\" (UniqueName: \"kubernetes.io/projected/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca-kube-api-access-xprcd\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.034422 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-tdf78"] Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.046599 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-d139-account-create-update-c7sz4"] Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.056854 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-136d-account-create-update-pslhl"] Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.062530 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-d139-account-create-update-c7sz4"] Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.067858 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-136d-account-create-update-pslhl"] Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.072956 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-tdf78"] Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.137283 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" event={"ID":"4cd3ee71-cc07-4cbb-93aa-f732ffa793ca","Type":"ContainerDied","Data":"c85897685c0feec502ad4a90dfe47f7e904a5ba9740f5186f64c353c92c697b5"} Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.137328 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c85897685c0feec502ad4a90dfe47f7e904a5ba9740f5186f64c353c92c697b5" Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.137370 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz" Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.450574 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27e7bb7f-22c2-4852-b264-371eeaa3907d" path="/var/lib/kubelet/pods/27e7bb7f-22c2-4852-b264-371eeaa3907d/volumes" Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.452239 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45f05976-8325-42bb-a4ac-c49c0fd7a0c2" path="/var/lib/kubelet/pods/45f05976-8325-42bb-a4ac-c49c0fd7a0c2/volumes" Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.454382 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b193a3f-89f3-467c-93db-3c84e3d9272d" path="/var/lib/kubelet/pods/4b193a3f-89f3-467c-93db-3c84e3d9272d/volumes" Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.456003 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ac9fc7f-8feb-41e2-b61b-084a9efd4512" path="/var/lib/kubelet/pods/7ac9fc7f-8feb-41e2-b61b-084a9efd4512/volumes" Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.457905 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b557b94-67b1-424a-9f45-84ea1183e728" path="/var/lib/kubelet/pods/7b557b94-67b1-424a-9f45-84ea1183e728/volumes" Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.459786 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98538850-301a-4da8-aa72-7df26932b307" path="/var/lib/kubelet/pods/98538850-301a-4da8-aa72-7df26932b307/volumes" Nov 26 07:15:04 crc kubenswrapper[4492]: I1126 07:15:04.463467 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5ed7a55-ffae-4dfd-b384-1f7eaa41a221" path="/var/lib/kubelet/pods/c5ed7a55-ffae-4dfd-b384-1f7eaa41a221/volumes" Nov 26 07:15:08 crc kubenswrapper[4492]: I1126 07:15:08.439925 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:15:08 crc kubenswrapper[4492]: E1126 07:15:08.441115 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:15:09 crc kubenswrapper[4492]: I1126 07:15:09.028434 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-j8sgq"] Nov 26 07:15:09 crc kubenswrapper[4492]: I1126 07:15:09.035234 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-j8sgq"] Nov 26 07:15:10 crc kubenswrapper[4492]: I1126 07:15:10.452374 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17dcd8ee-932f-4f74-be85-653f6f94a213" path="/var/lib/kubelet/pods/17dcd8ee-932f-4f74-be85-653f6f94a213/volumes" Nov 26 07:15:12 crc kubenswrapper[4492]: I1126 07:15:12.029719 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-c6d4d"] Nov 26 07:15:12 crc kubenswrapper[4492]: I1126 07:15:12.035396 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-c6d4d"] Nov 26 07:15:12 crc kubenswrapper[4492]: I1126 07:15:12.450814 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d934c9b1-8adf-41d3-9501-d55fffe02fd7" path="/var/lib/kubelet/pods/d934c9b1-8adf-41d3-9501-d55fffe02fd7/volumes" Nov 26 07:15:23 crc kubenswrapper[4492]: I1126 07:15:23.439202 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:15:23 crc kubenswrapper[4492]: E1126 07:15:23.440144 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:15:32 crc kubenswrapper[4492]: I1126 07:15:32.036525 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-rl9z8"] Nov 26 07:15:32 crc kubenswrapper[4492]: I1126 07:15:32.044575 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-rl9z8"] Nov 26 07:15:32 crc kubenswrapper[4492]: I1126 07:15:32.449011 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0afbb611-57c8-4d5a-a258-cc184185d75c" path="/var/lib/kubelet/pods/0afbb611-57c8-4d5a-a258-cc184185d75c/volumes" Nov 26 07:15:36 crc kubenswrapper[4492]: I1126 07:15:36.439607 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:15:36 crc kubenswrapper[4492]: E1126 07:15:36.440573 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.203965 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kxdl2"] Nov 26 07:15:43 crc kubenswrapper[4492]: E1126 07:15:43.205199 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cd3ee71-cc07-4cbb-93aa-f732ffa793ca" containerName="collect-profiles" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.205219 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cd3ee71-cc07-4cbb-93aa-f732ffa793ca" containerName="collect-profiles" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.205412 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cd3ee71-cc07-4cbb-93aa-f732ffa793ca" containerName="collect-profiles" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.206853 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.220616 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kxdl2"] Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.403092 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-catalog-content\") pod \"certified-operators-kxdl2\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.403168 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txhr2\" (UniqueName: \"kubernetes.io/projected/692e4c9a-f411-4ac6-8b33-10acd6742b4d-kube-api-access-txhr2\") pod \"certified-operators-kxdl2\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.403225 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-utilities\") pod \"certified-operators-kxdl2\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.505876 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txhr2\" (UniqueName: \"kubernetes.io/projected/692e4c9a-f411-4ac6-8b33-10acd6742b4d-kube-api-access-txhr2\") pod \"certified-operators-kxdl2\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.505922 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-utilities\") pod \"certified-operators-kxdl2\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.506149 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-catalog-content\") pod \"certified-operators-kxdl2\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.507693 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-utilities\") pod \"certified-operators-kxdl2\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.507818 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-catalog-content\") pod \"certified-operators-kxdl2\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.542361 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txhr2\" (UniqueName: \"kubernetes.io/projected/692e4c9a-f411-4ac6-8b33-10acd6742b4d-kube-api-access-txhr2\") pod \"certified-operators-kxdl2\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:43 crc kubenswrapper[4492]: I1126 07:15:43.831817 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:44 crc kubenswrapper[4492]: I1126 07:15:44.322001 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kxdl2"] Nov 26 07:15:44 crc kubenswrapper[4492]: I1126 07:15:44.572470 4492 generic.go:334] "Generic (PLEG): container finished" podID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerID="6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c" exitCode=0 Nov 26 07:15:44 crc kubenswrapper[4492]: I1126 07:15:44.573329 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kxdl2" event={"ID":"692e4c9a-f411-4ac6-8b33-10acd6742b4d","Type":"ContainerDied","Data":"6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c"} Nov 26 07:15:44 crc kubenswrapper[4492]: I1126 07:15:44.573409 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kxdl2" event={"ID":"692e4c9a-f411-4ac6-8b33-10acd6742b4d","Type":"ContainerStarted","Data":"a45b874d2b8d108fc515b7804f4377e6a123e128bd7234ea13e70ae332d993d3"} Nov 26 07:15:44 crc kubenswrapper[4492]: I1126 07:15:44.574707 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:15:45 crc kubenswrapper[4492]: I1126 07:15:45.587386 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kxdl2" event={"ID":"692e4c9a-f411-4ac6-8b33-10acd6742b4d","Type":"ContainerStarted","Data":"20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f"} Nov 26 07:15:46 crc kubenswrapper[4492]: I1126 07:15:46.599410 4492 generic.go:334] "Generic (PLEG): container finished" podID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerID="20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f" exitCode=0 Nov 26 07:15:46 crc kubenswrapper[4492]: I1126 07:15:46.599607 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kxdl2" event={"ID":"692e4c9a-f411-4ac6-8b33-10acd6742b4d","Type":"ContainerDied","Data":"20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f"} Nov 26 07:15:47 crc kubenswrapper[4492]: I1126 07:15:47.614422 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kxdl2" event={"ID":"692e4c9a-f411-4ac6-8b33-10acd6742b4d","Type":"ContainerStarted","Data":"c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241"} Nov 26 07:15:47 crc kubenswrapper[4492]: I1126 07:15:47.632925 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kxdl2" podStartSLOduration=2.107192296 podStartE2EDuration="4.632897193s" podCreationTimestamp="2025-11-26 07:15:43 +0000 UTC" firstStartedPulling="2025-11-26 07:15:44.574483075 +0000 UTC m=+1640.458371374" lastFinishedPulling="2025-11-26 07:15:47.100187973 +0000 UTC m=+1642.984076271" observedRunningTime="2025-11-26 07:15:47.629601717 +0000 UTC m=+1643.513490025" watchObservedRunningTime="2025-11-26 07:15:47.632897193 +0000 UTC m=+1643.516785492" Nov 26 07:15:48 crc kubenswrapper[4492]: I1126 07:15:48.438554 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:15:48 crc kubenswrapper[4492]: E1126 07:15:48.439109 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:15:52 crc kubenswrapper[4492]: I1126 07:15:52.664420 4492 generic.go:334] "Generic (PLEG): container finished" podID="016b702a-3d42-416f-9c38-007854d31be0" containerID="82bf4c870aaebf9152a5bb99730315eb6e792e4a0d2de352ed9486081a3b7350" exitCode=0 Nov 26 07:15:52 crc kubenswrapper[4492]: I1126 07:15:52.664504 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" event={"ID":"016b702a-3d42-416f-9c38-007854d31be0","Type":"ContainerDied","Data":"82bf4c870aaebf9152a5bb99730315eb6e792e4a0d2de352ed9486081a3b7350"} Nov 26 07:15:53 crc kubenswrapper[4492]: I1126 07:15:53.832395 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:53 crc kubenswrapper[4492]: I1126 07:15:53.832833 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:53 crc kubenswrapper[4492]: I1126 07:15:53.882889 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.024785 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.150255 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-inventory\") pod \"016b702a-3d42-416f-9c38-007854d31be0\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.150486 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-ssh-key\") pod \"016b702a-3d42-416f-9c38-007854d31be0\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.150850 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7nkr\" (UniqueName: \"kubernetes.io/projected/016b702a-3d42-416f-9c38-007854d31be0-kube-api-access-r7nkr\") pod \"016b702a-3d42-416f-9c38-007854d31be0\" (UID: \"016b702a-3d42-416f-9c38-007854d31be0\") " Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.158473 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/016b702a-3d42-416f-9c38-007854d31be0-kube-api-access-r7nkr" (OuterVolumeSpecName: "kube-api-access-r7nkr") pod "016b702a-3d42-416f-9c38-007854d31be0" (UID: "016b702a-3d42-416f-9c38-007854d31be0"). InnerVolumeSpecName "kube-api-access-r7nkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.177498 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "016b702a-3d42-416f-9c38-007854d31be0" (UID: "016b702a-3d42-416f-9c38-007854d31be0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.177879 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-inventory" (OuterVolumeSpecName: "inventory") pod "016b702a-3d42-416f-9c38-007854d31be0" (UID: "016b702a-3d42-416f-9c38-007854d31be0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.252783 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.252822 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/016b702a-3d42-416f-9c38-007854d31be0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.252832 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7nkr\" (UniqueName: \"kubernetes.io/projected/016b702a-3d42-416f-9c38-007854d31be0-kube-api-access-r7nkr\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.689531 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.690039 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tq5xs" event={"ID":"016b702a-3d42-416f-9c38-007854d31be0","Type":"ContainerDied","Data":"5951be8e43089d0343a70233b6faee47114d7e1b7e5f9f561af70dbf87497aa3"} Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.690073 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5951be8e43089d0343a70233b6faee47114d7e1b7e5f9f561af70dbf87497aa3" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.736754 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.777340 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22"] Nov 26 07:15:54 crc kubenswrapper[4492]: E1126 07:15:54.778000 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="016b702a-3d42-416f-9c38-007854d31be0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.778020 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="016b702a-3d42-416f-9c38-007854d31be0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.778224 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="016b702a-3d42-416f-9c38-007854d31be0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.778886 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.783372 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.783489 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.783508 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.783775 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.790313 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kxdl2"] Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.803605 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22"] Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.865538 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jjl22\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.865720 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkc76\" (UniqueName: \"kubernetes.io/projected/57457a36-eece-4b23-96ae-2c52b9a15182-kube-api-access-rkc76\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jjl22\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.865990 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jjl22\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.968874 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jjl22\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.969058 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkc76\" (UniqueName: \"kubernetes.io/projected/57457a36-eece-4b23-96ae-2c52b9a15182-kube-api-access-rkc76\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jjl22\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.969165 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jjl22\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.975357 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jjl22\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.983797 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkc76\" (UniqueName: \"kubernetes.io/projected/57457a36-eece-4b23-96ae-2c52b9a15182-kube-api-access-rkc76\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jjl22\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:54 crc kubenswrapper[4492]: I1126 07:15:54.984372 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-jjl22\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:55 crc kubenswrapper[4492]: I1126 07:15:55.058105 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-79hj2"] Nov 26 07:15:55 crc kubenswrapper[4492]: I1126 07:15:55.070338 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-79hj2"] Nov 26 07:15:55 crc kubenswrapper[4492]: I1126 07:15:55.114362 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:15:55 crc kubenswrapper[4492]: I1126 07:15:55.659460 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22"] Nov 26 07:15:55 crc kubenswrapper[4492]: I1126 07:15:55.700239 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" event={"ID":"57457a36-eece-4b23-96ae-2c52b9a15182","Type":"ContainerStarted","Data":"b77d53efd6877803a4586a8622ac7561c0e1eae02c15023656ad6df8b62e006d"} Nov 26 07:15:56 crc kubenswrapper[4492]: I1126 07:15:56.451293 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0093dcb6-c7e5-4b5a-94a3-55fc7465109a" path="/var/lib/kubelet/pods/0093dcb6-c7e5-4b5a-94a3-55fc7465109a/volumes" Nov 26 07:15:56 crc kubenswrapper[4492]: I1126 07:15:56.711527 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" event={"ID":"57457a36-eece-4b23-96ae-2c52b9a15182","Type":"ContainerStarted","Data":"10529f7ec4f1c34d08b3729872a6a840266ecc0e64eb3ce2b27f0e31de0c44c6"} Nov 26 07:15:56 crc kubenswrapper[4492]: I1126 07:15:56.711739 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kxdl2" podUID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerName="registry-server" containerID="cri-o://c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241" gracePeriod=2 Nov 26 07:15:56 crc kubenswrapper[4492]: I1126 07:15:56.749628 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" podStartSLOduration=2.212734139 podStartE2EDuration="2.74961356s" podCreationTimestamp="2025-11-26 07:15:54 +0000 UTC" firstStartedPulling="2025-11-26 07:15:55.656114747 +0000 UTC m=+1651.540003044" lastFinishedPulling="2025-11-26 07:15:56.192994167 +0000 UTC m=+1652.076882465" observedRunningTime="2025-11-26 07:15:56.74046533 +0000 UTC m=+1652.624353628" watchObservedRunningTime="2025-11-26 07:15:56.74961356 +0000 UTC m=+1652.633501858" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.099163 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.133326 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-utilities\") pod \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.133579 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-catalog-content\") pod \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.133788 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txhr2\" (UniqueName: \"kubernetes.io/projected/692e4c9a-f411-4ac6-8b33-10acd6742b4d-kube-api-access-txhr2\") pod \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\" (UID: \"692e4c9a-f411-4ac6-8b33-10acd6742b4d\") " Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.134331 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-utilities" (OuterVolumeSpecName: "utilities") pod "692e4c9a-f411-4ac6-8b33-10acd6742b4d" (UID: "692e4c9a-f411-4ac6-8b33-10acd6742b4d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.139328 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/692e4c9a-f411-4ac6-8b33-10acd6742b4d-kube-api-access-txhr2" (OuterVolumeSpecName: "kube-api-access-txhr2") pod "692e4c9a-f411-4ac6-8b33-10acd6742b4d" (UID: "692e4c9a-f411-4ac6-8b33-10acd6742b4d"). InnerVolumeSpecName "kube-api-access-txhr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.183042 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "692e4c9a-f411-4ac6-8b33-10acd6742b4d" (UID: "692e4c9a-f411-4ac6-8b33-10acd6742b4d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.235152 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txhr2\" (UniqueName: \"kubernetes.io/projected/692e4c9a-f411-4ac6-8b33-10acd6742b4d-kube-api-access-txhr2\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.235273 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.235335 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/692e4c9a-f411-4ac6-8b33-10acd6742b4d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.292752 4492 scope.go:117] "RemoveContainer" containerID="a7da5b7c9163e0278ea4c98c0b4679a9f780720bd8fe822e5d4bd5e3c2246515" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.335632 4492 scope.go:117] "RemoveContainer" containerID="d04a9f1e1e2d04c91f008ca508af213e2e49acb67d3f9f2397ed841d76f309d3" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.369820 4492 scope.go:117] "RemoveContainer" containerID="bc36adc448fff470d1848e0d272ffb71681450bfc5bcddb8fdb90aef70c6cd6a" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.411213 4492 scope.go:117] "RemoveContainer" containerID="297923635a8a1396cf3500ac2ae4aab939fb95aeaaa2e93c3e59669d382c50a4" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.470122 4492 scope.go:117] "RemoveContainer" containerID="0a033c207259e67f04802c08de54ed5b21711c1159d120320b22c73a1c05aed6" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.492275 4492 scope.go:117] "RemoveContainer" containerID="378c8ec908fbbdb4c51886bb6a2aae385f73667b1f051ba4e0ce2b4b30191a58" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.512680 4492 scope.go:117] "RemoveContainer" containerID="e57a8174821d3fac1cf7e65a6bc615d669095d79385d24cb311950da4fcd023d" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.532219 4492 scope.go:117] "RemoveContainer" containerID="9225b5f86521510ad56d9c5a1a04de3248c0aaceac3d8bf8d9bf2a940f9eb5da" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.566410 4492 scope.go:117] "RemoveContainer" containerID="55c9f99473c91b275fbc5b09ad9542a9dedfff61abeb72e11f1a43b64d37b6a7" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.594029 4492 scope.go:117] "RemoveContainer" containerID="ec64bde6351f0bc46940eccfd21a44db6e39b6048f6be897def604f723a3d7df" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.619600 4492 scope.go:117] "RemoveContainer" containerID="a47769566775605c1729f6e47025939de11d410fa967fa5364914970d6d89eea" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.649668 4492 scope.go:117] "RemoveContainer" containerID="cc6a8a9aa2fe8c761d0f115f7232d5a289e9fa4ee909c92a6c4b7aa2f10c7ef0" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.768106 4492 generic.go:334] "Generic (PLEG): container finished" podID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerID="c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241" exitCode=0 Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.769678 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kxdl2" event={"ID":"692e4c9a-f411-4ac6-8b33-10acd6742b4d","Type":"ContainerDied","Data":"c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241"} Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.769716 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kxdl2" event={"ID":"692e4c9a-f411-4ac6-8b33-10acd6742b4d","Type":"ContainerDied","Data":"a45b874d2b8d108fc515b7804f4377e6a123e128bd7234ea13e70ae332d993d3"} Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.769741 4492 scope.go:117] "RemoveContainer" containerID="c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.769855 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kxdl2" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.809951 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kxdl2"] Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.813727 4492 scope.go:117] "RemoveContainer" containerID="20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.817006 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kxdl2"] Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.838709 4492 scope.go:117] "RemoveContainer" containerID="6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.872393 4492 scope.go:117] "RemoveContainer" containerID="c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241" Nov 26 07:15:57 crc kubenswrapper[4492]: E1126 07:15:57.872926 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241\": container with ID starting with c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241 not found: ID does not exist" containerID="c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.872964 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241"} err="failed to get container status \"c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241\": rpc error: code = NotFound desc = could not find container \"c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241\": container with ID starting with c372e45b61881a9a978db8b5d887d41b8062c94de0d77b0a4c79c58e8ece0241 not found: ID does not exist" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.873005 4492 scope.go:117] "RemoveContainer" containerID="20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f" Nov 26 07:15:57 crc kubenswrapper[4492]: E1126 07:15:57.873711 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f\": container with ID starting with 20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f not found: ID does not exist" containerID="20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.873808 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f"} err="failed to get container status \"20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f\": rpc error: code = NotFound desc = could not find container \"20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f\": container with ID starting with 20fba51634db50d5e99417c015cedc76400b23ee1f5af8663f8cc5edfb8ecd5f not found: ID does not exist" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.873851 4492 scope.go:117] "RemoveContainer" containerID="6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c" Nov 26 07:15:57 crc kubenswrapper[4492]: E1126 07:15:57.874334 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c\": container with ID starting with 6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c not found: ID does not exist" containerID="6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c" Nov 26 07:15:57 crc kubenswrapper[4492]: I1126 07:15:57.874382 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c"} err="failed to get container status \"6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c\": rpc error: code = NotFound desc = could not find container \"6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c\": container with ID starting with 6fb867b6def94279167270bb48c4b8259ccc58f2e509a4cbe64bb6592ed4710c not found: ID does not exist" Nov 26 07:15:58 crc kubenswrapper[4492]: I1126 07:15:58.450885 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" path="/var/lib/kubelet/pods/692e4c9a-f411-4ac6-8b33-10acd6742b4d/volumes" Nov 26 07:16:02 crc kubenswrapper[4492]: I1126 07:16:02.026023 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-hqvtp"] Nov 26 07:16:02 crc kubenswrapper[4492]: I1126 07:16:02.032040 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-hqvtp"] Nov 26 07:16:02 crc kubenswrapper[4492]: I1126 07:16:02.449580 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbe81971-8d1f-4681-9bfb-5b13a46a5788" path="/var/lib/kubelet/pods/dbe81971-8d1f-4681-9bfb-5b13a46a5788/volumes" Nov 26 07:16:03 crc kubenswrapper[4492]: I1126 07:16:03.438272 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:16:03 crc kubenswrapper[4492]: E1126 07:16:03.438556 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:16:11 crc kubenswrapper[4492]: I1126 07:16:11.032876 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-9249p"] Nov 26 07:16:11 crc kubenswrapper[4492]: I1126 07:16:11.040272 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-9249p"] Nov 26 07:16:12 crc kubenswrapper[4492]: I1126 07:16:12.028351 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-7s6sk"] Nov 26 07:16:12 crc kubenswrapper[4492]: I1126 07:16:12.037603 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-7s6sk"] Nov 26 07:16:12 crc kubenswrapper[4492]: I1126 07:16:12.452237 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad2234e1-842b-4bba-bd21-9fb781403667" path="/var/lib/kubelet/pods/ad2234e1-842b-4bba-bd21-9fb781403667/volumes" Nov 26 07:16:12 crc kubenswrapper[4492]: I1126 07:16:12.453053 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e34f6949-eab2-4b97-9ba1-54ed3e59da5c" path="/var/lib/kubelet/pods/e34f6949-eab2-4b97-9ba1-54ed3e59da5c/volumes" Nov 26 07:16:17 crc kubenswrapper[4492]: I1126 07:16:17.029593 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-8w6bv"] Nov 26 07:16:17 crc kubenswrapper[4492]: I1126 07:16:17.035706 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-8w6bv"] Nov 26 07:16:17 crc kubenswrapper[4492]: I1126 07:16:17.439008 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:16:17 crc kubenswrapper[4492]: E1126 07:16:17.439372 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:16:18 crc kubenswrapper[4492]: I1126 07:16:18.449736 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a497bcf3-f8db-4b08-b5e3-33d050f9901a" path="/var/lib/kubelet/pods/a497bcf3-f8db-4b08-b5e3-33d050f9901a/volumes" Nov 26 07:16:30 crc kubenswrapper[4492]: I1126 07:16:30.438827 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:16:30 crc kubenswrapper[4492]: E1126 07:16:30.439654 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:16:45 crc kubenswrapper[4492]: I1126 07:16:45.439720 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:16:45 crc kubenswrapper[4492]: E1126 07:16:45.440945 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:16:57 crc kubenswrapper[4492]: I1126 07:16:57.963161 4492 scope.go:117] "RemoveContainer" containerID="ad33beecc6ccb3a991f02d101327f026e456d05ca55d63d06df60954716ad9d4" Nov 26 07:16:57 crc kubenswrapper[4492]: I1126 07:16:57.997291 4492 scope.go:117] "RemoveContainer" containerID="0139f1254c25e47ff0698b3f33336d095e00582bb7eb1857b5398dd1c39fcbd9" Nov 26 07:16:58 crc kubenswrapper[4492]: I1126 07:16:58.030360 4492 scope.go:117] "RemoveContainer" containerID="c574a168ff50590cf9930203b023690b39d0cb0a4648651d42d746723916a98b" Nov 26 07:16:58 crc kubenswrapper[4492]: I1126 07:16:58.064096 4492 scope.go:117] "RemoveContainer" containerID="67997b8189653f22b1f851f2eb4fbc6ebd61e5a924b9dc1ab9f1bb3eb873129d" Nov 26 07:16:58 crc kubenswrapper[4492]: I1126 07:16:58.373164 4492 generic.go:334] "Generic (PLEG): container finished" podID="57457a36-eece-4b23-96ae-2c52b9a15182" containerID="10529f7ec4f1c34d08b3729872a6a840266ecc0e64eb3ce2b27f0e31de0c44c6" exitCode=0 Nov 26 07:16:58 crc kubenswrapper[4492]: I1126 07:16:58.373270 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" event={"ID":"57457a36-eece-4b23-96ae-2c52b9a15182","Type":"ContainerDied","Data":"10529f7ec4f1c34d08b3729872a6a840266ecc0e64eb3ce2b27f0e31de0c44c6"} Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.724665 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.837261 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkc76\" (UniqueName: \"kubernetes.io/projected/57457a36-eece-4b23-96ae-2c52b9a15182-kube-api-access-rkc76\") pod \"57457a36-eece-4b23-96ae-2c52b9a15182\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.837646 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-ssh-key\") pod \"57457a36-eece-4b23-96ae-2c52b9a15182\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.837722 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-inventory\") pod \"57457a36-eece-4b23-96ae-2c52b9a15182\" (UID: \"57457a36-eece-4b23-96ae-2c52b9a15182\") " Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.844564 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57457a36-eece-4b23-96ae-2c52b9a15182-kube-api-access-rkc76" (OuterVolumeSpecName: "kube-api-access-rkc76") pod "57457a36-eece-4b23-96ae-2c52b9a15182" (UID: "57457a36-eece-4b23-96ae-2c52b9a15182"). InnerVolumeSpecName "kube-api-access-rkc76". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.864511 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-inventory" (OuterVolumeSpecName: "inventory") pod "57457a36-eece-4b23-96ae-2c52b9a15182" (UID: "57457a36-eece-4b23-96ae-2c52b9a15182"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.864883 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "57457a36-eece-4b23-96ae-2c52b9a15182" (UID: "57457a36-eece-4b23-96ae-2c52b9a15182"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.940773 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.940930 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/57457a36-eece-4b23-96ae-2c52b9a15182-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:16:59 crc kubenswrapper[4492]: I1126 07:16:59.941008 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkc76\" (UniqueName: \"kubernetes.io/projected/57457a36-eece-4b23-96ae-2c52b9a15182-kube-api-access-rkc76\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.395004 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" event={"ID":"57457a36-eece-4b23-96ae-2c52b9a15182","Type":"ContainerDied","Data":"b77d53efd6877803a4586a8622ac7561c0e1eae02c15023656ad6df8b62e006d"} Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.395388 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b77d53efd6877803a4586a8622ac7561c0e1eae02c15023656ad6df8b62e006d" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.395487 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-jjl22" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.438053 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:17:00 crc kubenswrapper[4492]: E1126 07:17:00.438733 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.475653 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq"] Nov 26 07:17:00 crc kubenswrapper[4492]: E1126 07:17:00.476260 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerName="registry-server" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.476347 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerName="registry-server" Nov 26 07:17:00 crc kubenswrapper[4492]: E1126 07:17:00.476412 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57457a36-eece-4b23-96ae-2c52b9a15182" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.476467 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="57457a36-eece-4b23-96ae-2c52b9a15182" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 26 07:17:00 crc kubenswrapper[4492]: E1126 07:17:00.476517 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerName="extract-content" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.476567 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerName="extract-content" Nov 26 07:17:00 crc kubenswrapper[4492]: E1126 07:17:00.476636 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerName="extract-utilities" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.476682 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerName="extract-utilities" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.476925 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="57457a36-eece-4b23-96ae-2c52b9a15182" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.477006 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="692e4c9a-f411-4ac6-8b33-10acd6742b4d" containerName="registry-server" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.477790 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.481689 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.481760 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.481886 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.481931 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.484441 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq"] Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.657377 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbf2x\" (UniqueName: \"kubernetes.io/projected/05a7afc5-781d-49ca-b1cc-9de520f0de2b-kube-api-access-hbf2x\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.657495 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.657558 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.759207 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.759843 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.760007 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbf2x\" (UniqueName: \"kubernetes.io/projected/05a7afc5-781d-49ca-b1cc-9de520f0de2b-kube-api-access-hbf2x\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.765209 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.766323 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.778427 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbf2x\" (UniqueName: \"kubernetes.io/projected/05a7afc5-781d-49ca-b1cc-9de520f0de2b-kube-api-access-hbf2x\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:00 crc kubenswrapper[4492]: I1126 07:17:00.793512 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:01 crc kubenswrapper[4492]: I1126 07:17:01.335018 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq"] Nov 26 07:17:01 crc kubenswrapper[4492]: I1126 07:17:01.406409 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" event={"ID":"05a7afc5-781d-49ca-b1cc-9de520f0de2b","Type":"ContainerStarted","Data":"b85a3d7f25839eb7c83128b90f7adeef93670032f6d6f4e636e20e52d25a425c"} Nov 26 07:17:02 crc kubenswrapper[4492]: I1126 07:17:02.416628 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" event={"ID":"05a7afc5-781d-49ca-b1cc-9de520f0de2b","Type":"ContainerStarted","Data":"cb821d8c1e7fbce02af5299f91f7865ee968c122719253e1c6036366e8a39ba8"} Nov 26 07:17:02 crc kubenswrapper[4492]: I1126 07:17:02.447117 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" podStartSLOduration=1.850425287 podStartE2EDuration="2.447100441s" podCreationTimestamp="2025-11-26 07:17:00 +0000 UTC" firstStartedPulling="2025-11-26 07:17:01.344463947 +0000 UTC m=+1717.228352245" lastFinishedPulling="2025-11-26 07:17:01.941139102 +0000 UTC m=+1717.825027399" observedRunningTime="2025-11-26 07:17:02.438538553 +0000 UTC m=+1718.322426851" watchObservedRunningTime="2025-11-26 07:17:02.447100441 +0000 UTC m=+1718.330988739" Nov 26 07:17:06 crc kubenswrapper[4492]: I1126 07:17:06.458445 4492 generic.go:334] "Generic (PLEG): container finished" podID="05a7afc5-781d-49ca-b1cc-9de520f0de2b" containerID="cb821d8c1e7fbce02af5299f91f7865ee968c122719253e1c6036366e8a39ba8" exitCode=0 Nov 26 07:17:06 crc kubenswrapper[4492]: I1126 07:17:06.458529 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" event={"ID":"05a7afc5-781d-49ca-b1cc-9de520f0de2b","Type":"ContainerDied","Data":"cb821d8c1e7fbce02af5299f91f7865ee968c122719253e1c6036366e8a39ba8"} Nov 26 07:17:07 crc kubenswrapper[4492]: I1126 07:17:07.836336 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:07 crc kubenswrapper[4492]: I1126 07:17:07.920279 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbf2x\" (UniqueName: \"kubernetes.io/projected/05a7afc5-781d-49ca-b1cc-9de520f0de2b-kube-api-access-hbf2x\") pod \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " Nov 26 07:17:07 crc kubenswrapper[4492]: I1126 07:17:07.920619 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-inventory\") pod \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " Nov 26 07:17:07 crc kubenswrapper[4492]: I1126 07:17:07.920653 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-ssh-key\") pod \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\" (UID: \"05a7afc5-781d-49ca-b1cc-9de520f0de2b\") " Nov 26 07:17:07 crc kubenswrapper[4492]: I1126 07:17:07.929888 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05a7afc5-781d-49ca-b1cc-9de520f0de2b-kube-api-access-hbf2x" (OuterVolumeSpecName: "kube-api-access-hbf2x") pod "05a7afc5-781d-49ca-b1cc-9de520f0de2b" (UID: "05a7afc5-781d-49ca-b1cc-9de520f0de2b"). InnerVolumeSpecName "kube-api-access-hbf2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:07 crc kubenswrapper[4492]: I1126 07:17:07.945450 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "05a7afc5-781d-49ca-b1cc-9de520f0de2b" (UID: "05a7afc5-781d-49ca-b1cc-9de520f0de2b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:07 crc kubenswrapper[4492]: I1126 07:17:07.947457 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-inventory" (OuterVolumeSpecName: "inventory") pod "05a7afc5-781d-49ca-b1cc-9de520f0de2b" (UID: "05a7afc5-781d-49ca-b1cc-9de520f0de2b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.023893 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.024027 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/05a7afc5-781d-49ca-b1cc-9de520f0de2b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.024086 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbf2x\" (UniqueName: \"kubernetes.io/projected/05a7afc5-781d-49ca-b1cc-9de520f0de2b-kube-api-access-hbf2x\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.484368 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" event={"ID":"05a7afc5-781d-49ca-b1cc-9de520f0de2b","Type":"ContainerDied","Data":"b85a3d7f25839eb7c83128b90f7adeef93670032f6d6f4e636e20e52d25a425c"} Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.484696 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b85a3d7f25839eb7c83128b90f7adeef93670032f6d6f4e636e20e52d25a425c" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.484400 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-sh2zq" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.550468 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv"] Nov 26 07:17:08 crc kubenswrapper[4492]: E1126 07:17:08.551114 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05a7afc5-781d-49ca-b1cc-9de520f0de2b" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.551137 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="05a7afc5-781d-49ca-b1cc-9de520f0de2b" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.551532 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="05a7afc5-781d-49ca-b1cc-9de520f0de2b" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.552547 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.555864 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.555897 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.557635 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.558122 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.576424 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv"] Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.633140 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qxpv\" (UniqueName: \"kubernetes.io/projected/f0a99b5f-086f-47b2-9382-28b8a2612b12-kube-api-access-5qxpv\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ktqfv\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.633280 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ktqfv\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.633340 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ktqfv\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.736468 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qxpv\" (UniqueName: \"kubernetes.io/projected/f0a99b5f-086f-47b2-9382-28b8a2612b12-kube-api-access-5qxpv\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ktqfv\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.736579 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ktqfv\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.737416 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ktqfv\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.743594 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ktqfv\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.749663 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ktqfv\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.753639 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qxpv\" (UniqueName: \"kubernetes.io/projected/f0a99b5f-086f-47b2-9382-28b8a2612b12-kube-api-access-5qxpv\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-ktqfv\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:08 crc kubenswrapper[4492]: I1126 07:17:08.871863 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:09 crc kubenswrapper[4492]: I1126 07:17:09.365715 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv"] Nov 26 07:17:09 crc kubenswrapper[4492]: I1126 07:17:09.495040 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" event={"ID":"f0a99b5f-086f-47b2-9382-28b8a2612b12","Type":"ContainerStarted","Data":"bd612495c3abe5063fa416f04e19143203177c756f38d0f20182b87821c5e7d7"} Nov 26 07:17:10 crc kubenswrapper[4492]: I1126 07:17:10.504385 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" event={"ID":"f0a99b5f-086f-47b2-9382-28b8a2612b12","Type":"ContainerStarted","Data":"e5547f94c5ae67610d2eebe1f74f959a24a00e9466119be96c63ead26e9f8eca"} Nov 26 07:17:10 crc kubenswrapper[4492]: I1126 07:17:10.529997 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" podStartSLOduration=2.060803958 podStartE2EDuration="2.529973646s" podCreationTimestamp="2025-11-26 07:17:08 +0000 UTC" firstStartedPulling="2025-11-26 07:17:09.367078819 +0000 UTC m=+1725.250967117" lastFinishedPulling="2025-11-26 07:17:09.836248507 +0000 UTC m=+1725.720136805" observedRunningTime="2025-11-26 07:17:10.519132714 +0000 UTC m=+1726.403021012" watchObservedRunningTime="2025-11-26 07:17:10.529973646 +0000 UTC m=+1726.413861944" Nov 26 07:17:11 crc kubenswrapper[4492]: I1126 07:17:11.439639 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:17:11 crc kubenswrapper[4492]: E1126 07:17:11.439920 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:17:12 crc kubenswrapper[4492]: I1126 07:17:12.041380 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-2mcb9"] Nov 26 07:17:12 crc kubenswrapper[4492]: I1126 07:17:12.050071 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-cfv7t"] Nov 26 07:17:12 crc kubenswrapper[4492]: I1126 07:17:12.057603 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-2mcb9"] Nov 26 07:17:12 crc kubenswrapper[4492]: I1126 07:17:12.063480 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-cfv7t"] Nov 26 07:17:12 crc kubenswrapper[4492]: I1126 07:17:12.449588 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66f5a9c1-c990-415e-b0e2-28d8ff866cf1" path="/var/lib/kubelet/pods/66f5a9c1-c990-415e-b0e2-28d8ff866cf1/volumes" Nov 26 07:17:12 crc kubenswrapper[4492]: I1126 07:17:12.450381 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae7dd4ac-dc69-4186-923f-40616d7fbea6" path="/var/lib/kubelet/pods/ae7dd4ac-dc69-4186-923f-40616d7fbea6/volumes" Nov 26 07:17:13 crc kubenswrapper[4492]: I1126 07:17:13.030374 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-6xr2x"] Nov 26 07:17:13 crc kubenswrapper[4492]: I1126 07:17:13.039463 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-154f-account-create-update-n2256"] Nov 26 07:17:13 crc kubenswrapper[4492]: I1126 07:17:13.052875 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b365-account-create-update-4vpg6"] Nov 26 07:17:13 crc kubenswrapper[4492]: I1126 07:17:13.071255 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-6xr2x"] Nov 26 07:17:13 crc kubenswrapper[4492]: I1126 07:17:13.080658 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-4548-account-create-update-7rfwr"] Nov 26 07:17:13 crc kubenswrapper[4492]: I1126 07:17:13.086691 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-154f-account-create-update-n2256"] Nov 26 07:17:13 crc kubenswrapper[4492]: I1126 07:17:13.106571 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-b365-account-create-update-4vpg6"] Nov 26 07:17:13 crc kubenswrapper[4492]: I1126 07:17:13.124959 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-4548-account-create-update-7rfwr"] Nov 26 07:17:14 crc kubenswrapper[4492]: I1126 07:17:14.450499 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87ae35c0-5648-46cd-86df-3193cdbd748e" path="/var/lib/kubelet/pods/87ae35c0-5648-46cd-86df-3193cdbd748e/volumes" Nov 26 07:17:14 crc kubenswrapper[4492]: I1126 07:17:14.451449 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88818f37-64b1-4583-a66f-f6fc347fed00" path="/var/lib/kubelet/pods/88818f37-64b1-4583-a66f-f6fc347fed00/volumes" Nov 26 07:17:14 crc kubenswrapper[4492]: I1126 07:17:14.452047 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb6703a6-97ba-4d95-8094-647d0baec33e" path="/var/lib/kubelet/pods/cb6703a6-97ba-4d95-8094-647d0baec33e/volumes" Nov 26 07:17:14 crc kubenswrapper[4492]: I1126 07:17:14.453229 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9b50126-fe92-4e0e-a31a-e5c40823949c" path="/var/lib/kubelet/pods/e9b50126-fe92-4e0e-a31a-e5c40823949c/volumes" Nov 26 07:17:24 crc kubenswrapper[4492]: I1126 07:17:24.444059 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:17:24 crc kubenswrapper[4492]: E1126 07:17:24.444672 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:17:36 crc kubenswrapper[4492]: I1126 07:17:36.439566 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:17:36 crc kubenswrapper[4492]: E1126 07:17:36.440446 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:17:38 crc kubenswrapper[4492]: I1126 07:17:38.749012 4492 generic.go:334] "Generic (PLEG): container finished" podID="f0a99b5f-086f-47b2-9382-28b8a2612b12" containerID="e5547f94c5ae67610d2eebe1f74f959a24a00e9466119be96c63ead26e9f8eca" exitCode=0 Nov 26 07:17:38 crc kubenswrapper[4492]: I1126 07:17:38.749089 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" event={"ID":"f0a99b5f-086f-47b2-9382-28b8a2612b12","Type":"ContainerDied","Data":"e5547f94c5ae67610d2eebe1f74f959a24a00e9466119be96c63ead26e9f8eca"} Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.126675 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.160802 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-inventory\") pod \"f0a99b5f-086f-47b2-9382-28b8a2612b12\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.161065 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-ssh-key\") pod \"f0a99b5f-086f-47b2-9382-28b8a2612b12\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.161144 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qxpv\" (UniqueName: \"kubernetes.io/projected/f0a99b5f-086f-47b2-9382-28b8a2612b12-kube-api-access-5qxpv\") pod \"f0a99b5f-086f-47b2-9382-28b8a2612b12\" (UID: \"f0a99b5f-086f-47b2-9382-28b8a2612b12\") " Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.168567 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0a99b5f-086f-47b2-9382-28b8a2612b12-kube-api-access-5qxpv" (OuterVolumeSpecName: "kube-api-access-5qxpv") pod "f0a99b5f-086f-47b2-9382-28b8a2612b12" (UID: "f0a99b5f-086f-47b2-9382-28b8a2612b12"). InnerVolumeSpecName "kube-api-access-5qxpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.195086 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f0a99b5f-086f-47b2-9382-28b8a2612b12" (UID: "f0a99b5f-086f-47b2-9382-28b8a2612b12"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.198024 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-inventory" (OuterVolumeSpecName: "inventory") pod "f0a99b5f-086f-47b2-9382-28b8a2612b12" (UID: "f0a99b5f-086f-47b2-9382-28b8a2612b12"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.264388 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.264415 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f0a99b5f-086f-47b2-9382-28b8a2612b12-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.264427 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qxpv\" (UniqueName: \"kubernetes.io/projected/f0a99b5f-086f-47b2-9382-28b8a2612b12-kube-api-access-5qxpv\") on node \"crc\" DevicePath \"\"" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.768143 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" event={"ID":"f0a99b5f-086f-47b2-9382-28b8a2612b12","Type":"ContainerDied","Data":"bd612495c3abe5063fa416f04e19143203177c756f38d0f20182b87821c5e7d7"} Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.768202 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd612495c3abe5063fa416f04e19143203177c756f38d0f20182b87821c5e7d7" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.768257 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-ktqfv" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.842808 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh"] Nov 26 07:17:40 crc kubenswrapper[4492]: E1126 07:17:40.843204 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0a99b5f-086f-47b2-9382-28b8a2612b12" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.843223 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0a99b5f-086f-47b2-9382-28b8a2612b12" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.843431 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0a99b5f-086f-47b2-9382-28b8a2612b12" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.844062 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.848476 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.848647 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.849324 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.849491 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.856805 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh"] Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.875697 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-csrdh\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.875764 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgsvn\" (UniqueName: \"kubernetes.io/projected/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-kube-api-access-jgsvn\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-csrdh\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.876024 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-csrdh\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.978729 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-csrdh\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.978852 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgsvn\" (UniqueName: \"kubernetes.io/projected/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-kube-api-access-jgsvn\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-csrdh\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.979168 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-csrdh\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.983428 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-csrdh\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.998451 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-csrdh\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:40 crc kubenswrapper[4492]: I1126 07:17:40.998975 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgsvn\" (UniqueName: \"kubernetes.io/projected/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-kube-api-access-jgsvn\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-csrdh\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:41 crc kubenswrapper[4492]: I1126 07:17:41.164589 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:17:41 crc kubenswrapper[4492]: I1126 07:17:41.697231 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh"] Nov 26 07:17:41 crc kubenswrapper[4492]: I1126 07:17:41.778285 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" event={"ID":"b4c2c2c0-ca05-4111-aa71-bb94c39cab08","Type":"ContainerStarted","Data":"4f43171a6ab4ca6777b89fc3c248975e6e5022c205fd39cb95a8ca2f9f7c760f"} Nov 26 07:17:42 crc kubenswrapper[4492]: I1126 07:17:42.803667 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" event={"ID":"b4c2c2c0-ca05-4111-aa71-bb94c39cab08","Type":"ContainerStarted","Data":"1bf9ca83b90aaa13fc34fc203b92af2ee89c95b2c2b4682f73665b78b7aee32e"} Nov 26 07:17:42 crc kubenswrapper[4492]: I1126 07:17:42.840214 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" podStartSLOduration=2.289979411 podStartE2EDuration="2.840190166s" podCreationTimestamp="2025-11-26 07:17:40 +0000 UTC" firstStartedPulling="2025-11-26 07:17:41.705927532 +0000 UTC m=+1757.589815830" lastFinishedPulling="2025-11-26 07:17:42.256138286 +0000 UTC m=+1758.140026585" observedRunningTime="2025-11-26 07:17:42.815727764 +0000 UTC m=+1758.699616061" watchObservedRunningTime="2025-11-26 07:17:42.840190166 +0000 UTC m=+1758.724078464" Nov 26 07:17:43 crc kubenswrapper[4492]: I1126 07:17:43.045710 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wnw6v"] Nov 26 07:17:43 crc kubenswrapper[4492]: I1126 07:17:43.057793 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wnw6v"] Nov 26 07:17:44 crc kubenswrapper[4492]: I1126 07:17:44.449723 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa" path="/var/lib/kubelet/pods/eb4502e1-bb87-4f1f-a4df-f6ffdfc8c1fa/volumes" Nov 26 07:17:49 crc kubenswrapper[4492]: I1126 07:17:49.438741 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:17:49 crc kubenswrapper[4492]: E1126 07:17:49.439742 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:17:58 crc kubenswrapper[4492]: I1126 07:17:58.197773 4492 scope.go:117] "RemoveContainer" containerID="160fdc4870abaef7485d921a8bbfef9c47e9aa6038e12d63f5edf617a263d7e5" Nov 26 07:17:58 crc kubenswrapper[4492]: I1126 07:17:58.230645 4492 scope.go:117] "RemoveContainer" containerID="6980074a256088ca155f9a2337d5f00ab5b25870c482a343f59fe2b9d33e6976" Nov 26 07:17:58 crc kubenswrapper[4492]: I1126 07:17:58.273446 4492 scope.go:117] "RemoveContainer" containerID="88e1fd74c66a6729340d4ae4796c21a0e05e2aa899fa2d2c3406ade5953b58d2" Nov 26 07:17:58 crc kubenswrapper[4492]: I1126 07:17:58.306236 4492 scope.go:117] "RemoveContainer" containerID="095fc718a0ac32ea00f9dc11ae768c035b2263601f68d397e70e7f24ebcad089" Nov 26 07:17:58 crc kubenswrapper[4492]: I1126 07:17:58.338058 4492 scope.go:117] "RemoveContainer" containerID="8352c3846c3d7e947d57816268b126813ef01e587ab5126180b2fefeb04c7680" Nov 26 07:17:58 crc kubenswrapper[4492]: I1126 07:17:58.365786 4492 scope.go:117] "RemoveContainer" containerID="63aec72331020afe59469e1a13758e8e0943f6f6bd27ed93dbf0e1d4b10a0058" Nov 26 07:17:58 crc kubenswrapper[4492]: I1126 07:17:58.397586 4492 scope.go:117] "RemoveContainer" containerID="2129a621888d0d70629f76ed5efc6c3a2b6445c1fde2499772f615b55bc844ff" Nov 26 07:18:01 crc kubenswrapper[4492]: I1126 07:18:01.045872 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9nfbl"] Nov 26 07:18:01 crc kubenswrapper[4492]: I1126 07:18:01.057772 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9nfbl"] Nov 26 07:18:01 crc kubenswrapper[4492]: I1126 07:18:01.438811 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:18:01 crc kubenswrapper[4492]: E1126 07:18:01.439507 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:18:02 crc kubenswrapper[4492]: I1126 07:18:02.450356 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6622ed2f-d8aa-4bc0-8a5a-73c0ed329035" path="/var/lib/kubelet/pods/6622ed2f-d8aa-4bc0-8a5a-73c0ed329035/volumes" Nov 26 07:18:03 crc kubenswrapper[4492]: I1126 07:18:03.027928 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-9ftsf"] Nov 26 07:18:03 crc kubenswrapper[4492]: I1126 07:18:03.035312 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-9ftsf"] Nov 26 07:18:04 crc kubenswrapper[4492]: I1126 07:18:04.479932 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ca524b1-6cc4-4333-a4fe-724793248c10" path="/var/lib/kubelet/pods/6ca524b1-6cc4-4333-a4fe-724793248c10/volumes" Nov 26 07:18:14 crc kubenswrapper[4492]: I1126 07:18:14.446462 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:18:14 crc kubenswrapper[4492]: E1126 07:18:14.447644 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:18:20 crc kubenswrapper[4492]: I1126 07:18:20.157568 4492 generic.go:334] "Generic (PLEG): container finished" podID="b4c2c2c0-ca05-4111-aa71-bb94c39cab08" containerID="1bf9ca83b90aaa13fc34fc203b92af2ee89c95b2c2b4682f73665b78b7aee32e" exitCode=0 Nov 26 07:18:20 crc kubenswrapper[4492]: I1126 07:18:20.157667 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" event={"ID":"b4c2c2c0-ca05-4111-aa71-bb94c39cab08","Type":"ContainerDied","Data":"1bf9ca83b90aaa13fc34fc203b92af2ee89c95b2c2b4682f73665b78b7aee32e"} Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.594906 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.731951 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgsvn\" (UniqueName: \"kubernetes.io/projected/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-kube-api-access-jgsvn\") pod \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.732096 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-inventory\") pod \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.732226 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-ssh-key\") pod \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\" (UID: \"b4c2c2c0-ca05-4111-aa71-bb94c39cab08\") " Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.749822 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-kube-api-access-jgsvn" (OuterVolumeSpecName: "kube-api-access-jgsvn") pod "b4c2c2c0-ca05-4111-aa71-bb94c39cab08" (UID: "b4c2c2c0-ca05-4111-aa71-bb94c39cab08"). InnerVolumeSpecName "kube-api-access-jgsvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.758016 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b4c2c2c0-ca05-4111-aa71-bb94c39cab08" (UID: "b4c2c2c0-ca05-4111-aa71-bb94c39cab08"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.760302 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-inventory" (OuterVolumeSpecName: "inventory") pod "b4c2c2c0-ca05-4111-aa71-bb94c39cab08" (UID: "b4c2c2c0-ca05-4111-aa71-bb94c39cab08"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.834699 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.834750 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:21 crc kubenswrapper[4492]: I1126 07:18:21.834766 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgsvn\" (UniqueName: \"kubernetes.io/projected/b4c2c2c0-ca05-4111-aa71-bb94c39cab08-kube-api-access-jgsvn\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.176665 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" event={"ID":"b4c2c2c0-ca05-4111-aa71-bb94c39cab08","Type":"ContainerDied","Data":"4f43171a6ab4ca6777b89fc3c248975e6e5022c205fd39cb95a8ca2f9f7c760f"} Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.176813 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f43171a6ab4ca6777b89fc3c248975e6e5022c205fd39cb95a8ca2f9f7c760f" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.176761 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-csrdh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.269468 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-5gwmh"] Nov 26 07:18:22 crc kubenswrapper[4492]: E1126 07:18:22.269893 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4c2c2c0-ca05-4111-aa71-bb94c39cab08" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.269911 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4c2c2c0-ca05-4111-aa71-bb94c39cab08" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.270204 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4c2c2c0-ca05-4111-aa71-bb94c39cab08" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.270963 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.273086 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.273345 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.276652 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.276955 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.280352 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-5gwmh"] Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.345803 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-5gwmh\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.346135 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-5gwmh\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.346387 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbpvv\" (UniqueName: \"kubernetes.io/projected/8402c43d-fc17-451a-adcb-03ec339e1d1c-kube-api-access-sbpvv\") pod \"ssh-known-hosts-edpm-deployment-5gwmh\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.448441 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-5gwmh\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.449825 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-5gwmh\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.451592 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbpvv\" (UniqueName: \"kubernetes.io/projected/8402c43d-fc17-451a-adcb-03ec339e1d1c-kube-api-access-sbpvv\") pod \"ssh-known-hosts-edpm-deployment-5gwmh\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.455056 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-5gwmh\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.455979 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-5gwmh\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.471539 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbpvv\" (UniqueName: \"kubernetes.io/projected/8402c43d-fc17-451a-adcb-03ec339e1d1c-kube-api-access-sbpvv\") pod \"ssh-known-hosts-edpm-deployment-5gwmh\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:22 crc kubenswrapper[4492]: I1126 07:18:22.589336 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:23 crc kubenswrapper[4492]: I1126 07:18:23.086530 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-5gwmh"] Nov 26 07:18:23 crc kubenswrapper[4492]: I1126 07:18:23.187306 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" event={"ID":"8402c43d-fc17-451a-adcb-03ec339e1d1c","Type":"ContainerStarted","Data":"b1a2caffa30a6b3e1156fcf051b97ffa4a5a3060564f37f6581285c3a709a497"} Nov 26 07:18:24 crc kubenswrapper[4492]: I1126 07:18:24.198260 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" event={"ID":"8402c43d-fc17-451a-adcb-03ec339e1d1c","Type":"ContainerStarted","Data":"6b82b743ba7f84aeacee874b1a5acdad686e3aade35f52193210dffedbe47cdb"} Nov 26 07:18:24 crc kubenswrapper[4492]: I1126 07:18:24.219250 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" podStartSLOduration=1.466010577 podStartE2EDuration="2.219230424s" podCreationTimestamp="2025-11-26 07:18:22 +0000 UTC" firstStartedPulling="2025-11-26 07:18:23.089045427 +0000 UTC m=+1798.972933725" lastFinishedPulling="2025-11-26 07:18:23.842265274 +0000 UTC m=+1799.726153572" observedRunningTime="2025-11-26 07:18:24.213716248 +0000 UTC m=+1800.097604535" watchObservedRunningTime="2025-11-26 07:18:24.219230424 +0000 UTC m=+1800.103118713" Nov 26 07:18:27 crc kubenswrapper[4492]: I1126 07:18:27.439485 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:18:27 crc kubenswrapper[4492]: E1126 07:18:27.440416 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:18:29 crc kubenswrapper[4492]: I1126 07:18:29.245680 4492 generic.go:334] "Generic (PLEG): container finished" podID="8402c43d-fc17-451a-adcb-03ec339e1d1c" containerID="6b82b743ba7f84aeacee874b1a5acdad686e3aade35f52193210dffedbe47cdb" exitCode=0 Nov 26 07:18:29 crc kubenswrapper[4492]: I1126 07:18:29.246020 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" event={"ID":"8402c43d-fc17-451a-adcb-03ec339e1d1c","Type":"ContainerDied","Data":"6b82b743ba7f84aeacee874b1a5acdad686e3aade35f52193210dffedbe47cdb"} Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.599447 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.628388 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbpvv\" (UniqueName: \"kubernetes.io/projected/8402c43d-fc17-451a-adcb-03ec339e1d1c-kube-api-access-sbpvv\") pod \"8402c43d-fc17-451a-adcb-03ec339e1d1c\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.628599 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-inventory-0\") pod \"8402c43d-fc17-451a-adcb-03ec339e1d1c\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.628933 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-ssh-key-openstack-edpm-ipam\") pod \"8402c43d-fc17-451a-adcb-03ec339e1d1c\" (UID: \"8402c43d-fc17-451a-adcb-03ec339e1d1c\") " Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.649427 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8402c43d-fc17-451a-adcb-03ec339e1d1c-kube-api-access-sbpvv" (OuterVolumeSpecName: "kube-api-access-sbpvv") pod "8402c43d-fc17-451a-adcb-03ec339e1d1c" (UID: "8402c43d-fc17-451a-adcb-03ec339e1d1c"). InnerVolumeSpecName "kube-api-access-sbpvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.655077 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "8402c43d-fc17-451a-adcb-03ec339e1d1c" (UID: "8402c43d-fc17-451a-adcb-03ec339e1d1c"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.656525 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "8402c43d-fc17-451a-adcb-03ec339e1d1c" (UID: "8402c43d-fc17-451a-adcb-03ec339e1d1c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.732409 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.732583 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbpvv\" (UniqueName: \"kubernetes.io/projected/8402c43d-fc17-451a-adcb-03ec339e1d1c-kube-api-access-sbpvv\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:30 crc kubenswrapper[4492]: I1126 07:18:30.732653 4492 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/8402c43d-fc17-451a-adcb-03ec339e1d1c-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.265420 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" event={"ID":"8402c43d-fc17-451a-adcb-03ec339e1d1c","Type":"ContainerDied","Data":"b1a2caffa30a6b3e1156fcf051b97ffa4a5a3060564f37f6581285c3a709a497"} Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.265761 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1a2caffa30a6b3e1156fcf051b97ffa4a5a3060564f37f6581285c3a709a497" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.265506 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-5gwmh" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.349256 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646"] Nov 26 07:18:31 crc kubenswrapper[4492]: E1126 07:18:31.349676 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8402c43d-fc17-451a-adcb-03ec339e1d1c" containerName="ssh-known-hosts-edpm-deployment" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.349698 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8402c43d-fc17-451a-adcb-03ec339e1d1c" containerName="ssh-known-hosts-edpm-deployment" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.349930 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="8402c43d-fc17-451a-adcb-03ec339e1d1c" containerName="ssh-known-hosts-edpm-deployment" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.350643 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.360551 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646"] Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.360656 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.362995 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.363249 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.363669 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.443332 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-bn646\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.443553 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-bn646\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.443647 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnsr6\" (UniqueName: \"kubernetes.io/projected/e527a884-c05c-41f0-9a86-e574860e6a77-kube-api-access-dnsr6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-bn646\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.545486 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-bn646\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.545552 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnsr6\" (UniqueName: \"kubernetes.io/projected/e527a884-c05c-41f0-9a86-e574860e6a77-kube-api-access-dnsr6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-bn646\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.545627 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-bn646\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.550052 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-bn646\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.550103 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-bn646\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.560570 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnsr6\" (UniqueName: \"kubernetes.io/projected/e527a884-c05c-41f0-9a86-e574860e6a77-kube-api-access-dnsr6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-bn646\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:31 crc kubenswrapper[4492]: I1126 07:18:31.667537 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.165620 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646"] Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.273712 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" event={"ID":"e527a884-c05c-41f0-9a86-e574860e6a77","Type":"ContainerStarted","Data":"caae791be3d8852f62699b6cff7acd690d912f2d0602a90d4d3e6626355bdf8e"} Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.536559 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n6jt7"] Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.539105 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.549508 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n6jt7"] Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.575105 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vspnz\" (UniqueName: \"kubernetes.io/projected/663dc283-8e98-43f0-97bc-4527ba1509e8-kube-api-access-vspnz\") pod \"community-operators-n6jt7\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.575478 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-utilities\") pod \"community-operators-n6jt7\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.575634 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-catalog-content\") pod \"community-operators-n6jt7\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.678555 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vspnz\" (UniqueName: \"kubernetes.io/projected/663dc283-8e98-43f0-97bc-4527ba1509e8-kube-api-access-vspnz\") pod \"community-operators-n6jt7\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.678689 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-utilities\") pod \"community-operators-n6jt7\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.679069 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-utilities\") pod \"community-operators-n6jt7\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.688218 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-catalog-content\") pod \"community-operators-n6jt7\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.679334 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-catalog-content\") pod \"community-operators-n6jt7\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.703758 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vspnz\" (UniqueName: \"kubernetes.io/projected/663dc283-8e98-43f0-97bc-4527ba1509e8-kube-api-access-vspnz\") pod \"community-operators-n6jt7\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:32 crc kubenswrapper[4492]: I1126 07:18:32.860580 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:33 crc kubenswrapper[4492]: I1126 07:18:33.283259 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" event={"ID":"e527a884-c05c-41f0-9a86-e574860e6a77","Type":"ContainerStarted","Data":"366bb8d1f68d57074de858c61af983075cef4c4c3b4cc11483fdc04c17c05756"} Nov 26 07:18:33 crc kubenswrapper[4492]: I1126 07:18:33.370057 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" podStartSLOduration=1.8908893390000001 podStartE2EDuration="2.370031716s" podCreationTimestamp="2025-11-26 07:18:31 +0000 UTC" firstStartedPulling="2025-11-26 07:18:32.179996181 +0000 UTC m=+1808.063884480" lastFinishedPulling="2025-11-26 07:18:32.659138559 +0000 UTC m=+1808.543026857" observedRunningTime="2025-11-26 07:18:33.306211636 +0000 UTC m=+1809.190099935" watchObservedRunningTime="2025-11-26 07:18:33.370031716 +0000 UTC m=+1809.253920014" Nov 26 07:18:33 crc kubenswrapper[4492]: I1126 07:18:33.376255 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n6jt7"] Nov 26 07:18:33 crc kubenswrapper[4492]: W1126 07:18:33.379078 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod663dc283_8e98_43f0_97bc_4527ba1509e8.slice/crio-ed66d97351915eddb5ca72a0012fb4e81f2b811888e9a40f458e0556cd846285 WatchSource:0}: Error finding container ed66d97351915eddb5ca72a0012fb4e81f2b811888e9a40f458e0556cd846285: Status 404 returned error can't find the container with id ed66d97351915eddb5ca72a0012fb4e81f2b811888e9a40f458e0556cd846285 Nov 26 07:18:34 crc kubenswrapper[4492]: I1126 07:18:34.294382 4492 generic.go:334] "Generic (PLEG): container finished" podID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerID="bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181" exitCode=0 Nov 26 07:18:34 crc kubenswrapper[4492]: I1126 07:18:34.294450 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6jt7" event={"ID":"663dc283-8e98-43f0-97bc-4527ba1509e8","Type":"ContainerDied","Data":"bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181"} Nov 26 07:18:34 crc kubenswrapper[4492]: I1126 07:18:34.295731 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6jt7" event={"ID":"663dc283-8e98-43f0-97bc-4527ba1509e8","Type":"ContainerStarted","Data":"ed66d97351915eddb5ca72a0012fb4e81f2b811888e9a40f458e0556cd846285"} Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.746021 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ss5gn"] Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.756218 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.758862 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ss5gn"] Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.865005 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-catalog-content\") pod \"redhat-operators-ss5gn\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.865314 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-utilities\") pod \"redhat-operators-ss5gn\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.865360 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48lbq\" (UniqueName: \"kubernetes.io/projected/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-kube-api-access-48lbq\") pod \"redhat-operators-ss5gn\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.966956 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-utilities\") pod \"redhat-operators-ss5gn\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.967012 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48lbq\" (UniqueName: \"kubernetes.io/projected/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-kube-api-access-48lbq\") pod \"redhat-operators-ss5gn\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.967081 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-catalog-content\") pod \"redhat-operators-ss5gn\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.967600 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-catalog-content\") pod \"redhat-operators-ss5gn\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.968107 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-utilities\") pod \"redhat-operators-ss5gn\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:35 crc kubenswrapper[4492]: I1126 07:18:35.995389 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48lbq\" (UniqueName: \"kubernetes.io/projected/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-kube-api-access-48lbq\") pod \"redhat-operators-ss5gn\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:36 crc kubenswrapper[4492]: I1126 07:18:36.077266 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:36 crc kubenswrapper[4492]: I1126 07:18:36.351761 4492 generic.go:334] "Generic (PLEG): container finished" podID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerID="abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63" exitCode=0 Nov 26 07:18:36 crc kubenswrapper[4492]: I1126 07:18:36.352064 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6jt7" event={"ID":"663dc283-8e98-43f0-97bc-4527ba1509e8","Type":"ContainerDied","Data":"abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63"} Nov 26 07:18:36 crc kubenswrapper[4492]: W1126 07:18:36.533365 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e99d4d4_fe00_43d0_bf42_d49a2cea8bcd.slice/crio-192099d8bb4b8b486ce8a16732c60c7f8fc8b501b36931e10efaad81d81b2e86 WatchSource:0}: Error finding container 192099d8bb4b8b486ce8a16732c60c7f8fc8b501b36931e10efaad81d81b2e86: Status 404 returned error can't find the container with id 192099d8bb4b8b486ce8a16732c60c7f8fc8b501b36931e10efaad81d81b2e86 Nov 26 07:18:36 crc kubenswrapper[4492]: I1126 07:18:36.541063 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ss5gn"] Nov 26 07:18:37 crc kubenswrapper[4492]: I1126 07:18:37.365023 4492 generic.go:334] "Generic (PLEG): container finished" podID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerID="edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5" exitCode=0 Nov 26 07:18:37 crc kubenswrapper[4492]: I1126 07:18:37.365105 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss5gn" event={"ID":"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd","Type":"ContainerDied","Data":"edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5"} Nov 26 07:18:37 crc kubenswrapper[4492]: I1126 07:18:37.365940 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss5gn" event={"ID":"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd","Type":"ContainerStarted","Data":"192099d8bb4b8b486ce8a16732c60c7f8fc8b501b36931e10efaad81d81b2e86"} Nov 26 07:18:37 crc kubenswrapper[4492]: I1126 07:18:37.371035 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6jt7" event={"ID":"663dc283-8e98-43f0-97bc-4527ba1509e8","Type":"ContainerStarted","Data":"da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10"} Nov 26 07:18:37 crc kubenswrapper[4492]: I1126 07:18:37.403500 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n6jt7" podStartSLOduration=2.853078929 podStartE2EDuration="5.403488255s" podCreationTimestamp="2025-11-26 07:18:32 +0000 UTC" firstStartedPulling="2025-11-26 07:18:34.298431402 +0000 UTC m=+1810.182319690" lastFinishedPulling="2025-11-26 07:18:36.848840719 +0000 UTC m=+1812.732729016" observedRunningTime="2025-11-26 07:18:37.397659997 +0000 UTC m=+1813.281548305" watchObservedRunningTime="2025-11-26 07:18:37.403488255 +0000 UTC m=+1813.287376553" Nov 26 07:18:38 crc kubenswrapper[4492]: I1126 07:18:38.381408 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss5gn" event={"ID":"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd","Type":"ContainerStarted","Data":"448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c"} Nov 26 07:18:38 crc kubenswrapper[4492]: I1126 07:18:38.438219 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:18:38 crc kubenswrapper[4492]: E1126 07:18:38.438500 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:18:40 crc kubenswrapper[4492]: I1126 07:18:40.403751 4492 generic.go:334] "Generic (PLEG): container finished" podID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerID="448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c" exitCode=0 Nov 26 07:18:40 crc kubenswrapper[4492]: I1126 07:18:40.403857 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss5gn" event={"ID":"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd","Type":"ContainerDied","Data":"448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c"} Nov 26 07:18:40 crc kubenswrapper[4492]: I1126 07:18:40.406938 4492 generic.go:334] "Generic (PLEG): container finished" podID="e527a884-c05c-41f0-9a86-e574860e6a77" containerID="366bb8d1f68d57074de858c61af983075cef4c4c3b4cc11483fdc04c17c05756" exitCode=0 Nov 26 07:18:40 crc kubenswrapper[4492]: I1126 07:18:40.406989 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" event={"ID":"e527a884-c05c-41f0-9a86-e574860e6a77","Type":"ContainerDied","Data":"366bb8d1f68d57074de858c61af983075cef4c4c3b4cc11483fdc04c17c05756"} Nov 26 07:18:41 crc kubenswrapper[4492]: I1126 07:18:41.419018 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss5gn" event={"ID":"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd","Type":"ContainerStarted","Data":"6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab"} Nov 26 07:18:41 crc kubenswrapper[4492]: I1126 07:18:41.443947 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ss5gn" podStartSLOduration=2.88218241 podStartE2EDuration="6.443933102s" podCreationTimestamp="2025-11-26 07:18:35 +0000 UTC" firstStartedPulling="2025-11-26 07:18:37.367474094 +0000 UTC m=+1813.251362392" lastFinishedPulling="2025-11-26 07:18:40.929224797 +0000 UTC m=+1816.813113084" observedRunningTime="2025-11-26 07:18:41.441630302 +0000 UTC m=+1817.325518601" watchObservedRunningTime="2025-11-26 07:18:41.443933102 +0000 UTC m=+1817.327821400" Nov 26 07:18:41 crc kubenswrapper[4492]: I1126 07:18:41.810919 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.006964 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-ssh-key\") pod \"e527a884-c05c-41f0-9a86-e574860e6a77\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.007110 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-inventory\") pod \"e527a884-c05c-41f0-9a86-e574860e6a77\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.007358 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnsr6\" (UniqueName: \"kubernetes.io/projected/e527a884-c05c-41f0-9a86-e574860e6a77-kube-api-access-dnsr6\") pod \"e527a884-c05c-41f0-9a86-e574860e6a77\" (UID: \"e527a884-c05c-41f0-9a86-e574860e6a77\") " Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.013159 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e527a884-c05c-41f0-9a86-e574860e6a77-kube-api-access-dnsr6" (OuterVolumeSpecName: "kube-api-access-dnsr6") pod "e527a884-c05c-41f0-9a86-e574860e6a77" (UID: "e527a884-c05c-41f0-9a86-e574860e6a77"). InnerVolumeSpecName "kube-api-access-dnsr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.034389 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-inventory" (OuterVolumeSpecName: "inventory") pod "e527a884-c05c-41f0-9a86-e574860e6a77" (UID: "e527a884-c05c-41f0-9a86-e574860e6a77"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.035107 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e527a884-c05c-41f0-9a86-e574860e6a77" (UID: "e527a884-c05c-41f0-9a86-e574860e6a77"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.110656 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnsr6\" (UniqueName: \"kubernetes.io/projected/e527a884-c05c-41f0-9a86-e574860e6a77-kube-api-access-dnsr6\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.110690 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.110701 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e527a884-c05c-41f0-9a86-e574860e6a77-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.429813 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" event={"ID":"e527a884-c05c-41f0-9a86-e574860e6a77","Type":"ContainerDied","Data":"caae791be3d8852f62699b6cff7acd690d912f2d0602a90d4d3e6626355bdf8e"} Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.430194 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="caae791be3d8852f62699b6cff7acd690d912f2d0602a90d4d3e6626355bdf8e" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.429907 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-bn646" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.517751 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc"] Nov 26 07:18:42 crc kubenswrapper[4492]: E1126 07:18:42.518429 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e527a884-c05c-41f0-9a86-e574860e6a77" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.518455 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="e527a884-c05c-41f0-9a86-e574860e6a77" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.518739 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="e527a884-c05c-41f0-9a86-e574860e6a77" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.519810 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.522459 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.522787 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.528275 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.529884 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.539063 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc"] Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.623999 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4svt8\" (UniqueName: \"kubernetes.io/projected/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-kube-api-access-4svt8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.624291 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.624690 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.726559 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.726679 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4svt8\" (UniqueName: \"kubernetes.io/projected/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-kube-api-access-4svt8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.726736 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.730396 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.731706 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.744717 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4svt8\" (UniqueName: \"kubernetes.io/projected/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-kube-api-access-4svt8\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.846373 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.860779 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.861074 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:42 crc kubenswrapper[4492]: I1126 07:18:42.905782 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:43 crc kubenswrapper[4492]: I1126 07:18:43.340383 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc"] Nov 26 07:18:43 crc kubenswrapper[4492]: I1126 07:18:43.439670 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" event={"ID":"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6","Type":"ContainerStarted","Data":"29a2f598f55648047142c45778bcc26eeb7374c5911a5f73b4cb8aab95fba83f"} Nov 26 07:18:43 crc kubenswrapper[4492]: I1126 07:18:43.485848 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:44 crc kubenswrapper[4492]: I1126 07:18:44.455248 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" event={"ID":"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6","Type":"ContainerStarted","Data":"6c172df1aee729ec03428528dea94df7ef4972a2ce3ead6c58efcbf5addca2b4"} Nov 26 07:18:44 crc kubenswrapper[4492]: I1126 07:18:44.483385 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" podStartSLOduration=1.964145696 podStartE2EDuration="2.483367617s" podCreationTimestamp="2025-11-26 07:18:42 +0000 UTC" firstStartedPulling="2025-11-26 07:18:43.348583381 +0000 UTC m=+1819.232471680" lastFinishedPulling="2025-11-26 07:18:43.867805302 +0000 UTC m=+1819.751693601" observedRunningTime="2025-11-26 07:18:44.481894166 +0000 UTC m=+1820.365782454" watchObservedRunningTime="2025-11-26 07:18:44.483367617 +0000 UTC m=+1820.367255914" Nov 26 07:18:46 crc kubenswrapper[4492]: I1126 07:18:46.046066 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-dhdz4"] Nov 26 07:18:46 crc kubenswrapper[4492]: I1126 07:18:46.051779 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-dhdz4"] Nov 26 07:18:46 crc kubenswrapper[4492]: I1126 07:18:46.078958 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:46 crc kubenswrapper[4492]: I1126 07:18:46.078992 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:46 crc kubenswrapper[4492]: I1126 07:18:46.456479 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74c85bf8-cdcf-4e64-83cf-5f62d3c90b09" path="/var/lib/kubelet/pods/74c85bf8-cdcf-4e64-83cf-5f62d3c90b09/volumes" Nov 26 07:18:47 crc kubenswrapper[4492]: I1126 07:18:47.115357 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ss5gn" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerName="registry-server" probeResult="failure" output=< Nov 26 07:18:47 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 07:18:47 crc kubenswrapper[4492]: > Nov 26 07:18:47 crc kubenswrapper[4492]: I1126 07:18:47.531934 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n6jt7"] Nov 26 07:18:47 crc kubenswrapper[4492]: I1126 07:18:47.532621 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n6jt7" podUID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerName="registry-server" containerID="cri-o://da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10" gracePeriod=2 Nov 26 07:18:47 crc kubenswrapper[4492]: I1126 07:18:47.942863 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.059696 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vspnz\" (UniqueName: \"kubernetes.io/projected/663dc283-8e98-43f0-97bc-4527ba1509e8-kube-api-access-vspnz\") pod \"663dc283-8e98-43f0-97bc-4527ba1509e8\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.059889 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-utilities\") pod \"663dc283-8e98-43f0-97bc-4527ba1509e8\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.061074 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-utilities" (OuterVolumeSpecName: "utilities") pod "663dc283-8e98-43f0-97bc-4527ba1509e8" (UID: "663dc283-8e98-43f0-97bc-4527ba1509e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.061366 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-catalog-content\") pod \"663dc283-8e98-43f0-97bc-4527ba1509e8\" (UID: \"663dc283-8e98-43f0-97bc-4527ba1509e8\") " Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.063133 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.066437 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/663dc283-8e98-43f0-97bc-4527ba1509e8-kube-api-access-vspnz" (OuterVolumeSpecName: "kube-api-access-vspnz") pod "663dc283-8e98-43f0-97bc-4527ba1509e8" (UID: "663dc283-8e98-43f0-97bc-4527ba1509e8"). InnerVolumeSpecName "kube-api-access-vspnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.103336 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "663dc283-8e98-43f0-97bc-4527ba1509e8" (UID: "663dc283-8e98-43f0-97bc-4527ba1509e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.165844 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vspnz\" (UniqueName: \"kubernetes.io/projected/663dc283-8e98-43f0-97bc-4527ba1509e8-kube-api-access-vspnz\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.165899 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/663dc283-8e98-43f0-97bc-4527ba1509e8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.494058 4492 generic.go:334] "Generic (PLEG): container finished" podID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerID="da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10" exitCode=0 Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.494137 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n6jt7" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.494142 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6jt7" event={"ID":"663dc283-8e98-43f0-97bc-4527ba1509e8","Type":"ContainerDied","Data":"da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10"} Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.494243 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n6jt7" event={"ID":"663dc283-8e98-43f0-97bc-4527ba1509e8","Type":"ContainerDied","Data":"ed66d97351915eddb5ca72a0012fb4e81f2b811888e9a40f458e0556cd846285"} Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.494270 4492 scope.go:117] "RemoveContainer" containerID="da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.527022 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n6jt7"] Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.531105 4492 scope.go:117] "RemoveContainer" containerID="abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.531813 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n6jt7"] Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.562923 4492 scope.go:117] "RemoveContainer" containerID="bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.587491 4492 scope.go:117] "RemoveContainer" containerID="da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10" Nov 26 07:18:48 crc kubenswrapper[4492]: E1126 07:18:48.587944 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10\": container with ID starting with da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10 not found: ID does not exist" containerID="da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.587983 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10"} err="failed to get container status \"da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10\": rpc error: code = NotFound desc = could not find container \"da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10\": container with ID starting with da53df7bc19cc3ab24e8642d67e170c35aec340e5abb462ffd86fad6fff38b10 not found: ID does not exist" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.588021 4492 scope.go:117] "RemoveContainer" containerID="abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63" Nov 26 07:18:48 crc kubenswrapper[4492]: E1126 07:18:48.588346 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63\": container with ID starting with abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63 not found: ID does not exist" containerID="abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.588372 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63"} err="failed to get container status \"abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63\": rpc error: code = NotFound desc = could not find container \"abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63\": container with ID starting with abeb2be0c7094b412c3f037777035f235eef500ba2cb0ec82f03a8273f58bd63 not found: ID does not exist" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.588390 4492 scope.go:117] "RemoveContainer" containerID="bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181" Nov 26 07:18:48 crc kubenswrapper[4492]: E1126 07:18:48.588671 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181\": container with ID starting with bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181 not found: ID does not exist" containerID="bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181" Nov 26 07:18:48 crc kubenswrapper[4492]: I1126 07:18:48.588690 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181"} err="failed to get container status \"bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181\": rpc error: code = NotFound desc = could not find container \"bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181\": container with ID starting with bbc1e6d7f1f71f005db27dd6e13843dd6df77f5b75e34ab9b203a199fa030181 not found: ID does not exist" Nov 26 07:18:50 crc kubenswrapper[4492]: I1126 07:18:50.438987 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:18:50 crc kubenswrapper[4492]: I1126 07:18:50.452347 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="663dc283-8e98-43f0-97bc-4527ba1509e8" path="/var/lib/kubelet/pods/663dc283-8e98-43f0-97bc-4527ba1509e8/volumes" Nov 26 07:18:51 crc kubenswrapper[4492]: I1126 07:18:51.521513 4492 generic.go:334] "Generic (PLEG): container finished" podID="ab55e0ec-7fd5-48d6-ae7e-9c879951aef6" containerID="6c172df1aee729ec03428528dea94df7ef4972a2ce3ead6c58efcbf5addca2b4" exitCode=0 Nov 26 07:18:51 crc kubenswrapper[4492]: I1126 07:18:51.522036 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" event={"ID":"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6","Type":"ContainerDied","Data":"6c172df1aee729ec03428528dea94df7ef4972a2ce3ead6c58efcbf5addca2b4"} Nov 26 07:18:51 crc kubenswrapper[4492]: I1126 07:18:51.524442 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"334da1a59c09d707f16e7e3825ec5855d6020e57d3066cd28e967266790a8121"} Nov 26 07:18:52 crc kubenswrapper[4492]: I1126 07:18:52.924986 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:52 crc kubenswrapper[4492]: I1126 07:18:52.971266 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-inventory\") pod \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " Nov 26 07:18:52 crc kubenswrapper[4492]: I1126 07:18:52.971392 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4svt8\" (UniqueName: \"kubernetes.io/projected/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-kube-api-access-4svt8\") pod \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " Nov 26 07:18:52 crc kubenswrapper[4492]: I1126 07:18:52.972131 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-ssh-key\") pod \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\" (UID: \"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6\") " Nov 26 07:18:52 crc kubenswrapper[4492]: I1126 07:18:52.980993 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-kube-api-access-4svt8" (OuterVolumeSpecName: "kube-api-access-4svt8") pod "ab55e0ec-7fd5-48d6-ae7e-9c879951aef6" (UID: "ab55e0ec-7fd5-48d6-ae7e-9c879951aef6"). InnerVolumeSpecName "kube-api-access-4svt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:52 crc kubenswrapper[4492]: I1126 07:18:52.998082 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ab55e0ec-7fd5-48d6-ae7e-9c879951aef6" (UID: "ab55e0ec-7fd5-48d6-ae7e-9c879951aef6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:52 crc kubenswrapper[4492]: I1126 07:18:52.998331 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-inventory" (OuterVolumeSpecName: "inventory") pod "ab55e0ec-7fd5-48d6-ae7e-9c879951aef6" (UID: "ab55e0ec-7fd5-48d6-ae7e-9c879951aef6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.073596 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.073631 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4svt8\" (UniqueName: \"kubernetes.io/projected/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-kube-api-access-4svt8\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.073646 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55e0ec-7fd5-48d6-ae7e-9c879951aef6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.545381 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" event={"ID":"ab55e0ec-7fd5-48d6-ae7e-9c879951aef6","Type":"ContainerDied","Data":"29a2f598f55648047142c45778bcc26eeb7374c5911a5f73b4cb8aab95fba83f"} Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.545853 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29a2f598f55648047142c45778bcc26eeb7374c5911a5f73b4cb8aab95fba83f" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.545451 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-jqtfc" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.636447 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j"] Nov 26 07:18:53 crc kubenswrapper[4492]: E1126 07:18:53.637021 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerName="registry-server" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.637043 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerName="registry-server" Nov 26 07:18:53 crc kubenswrapper[4492]: E1126 07:18:53.637068 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab55e0ec-7fd5-48d6-ae7e-9c879951aef6" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.637077 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab55e0ec-7fd5-48d6-ae7e-9c879951aef6" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:18:53 crc kubenswrapper[4492]: E1126 07:18:53.637090 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerName="extract-content" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.637098 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerName="extract-content" Nov 26 07:18:53 crc kubenswrapper[4492]: E1126 07:18:53.637119 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerName="extract-utilities" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.637124 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerName="extract-utilities" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.637375 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab55e0ec-7fd5-48d6-ae7e-9c879951aef6" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.637421 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="663dc283-8e98-43f0-97bc-4527ba1509e8" containerName="registry-server" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.638274 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.641071 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.641580 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.641743 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.641823 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.641998 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.642213 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.642764 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.643811 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.663619 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j"] Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.687310 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.687443 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.687567 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncxg5\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-kube-api-access-ncxg5\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.687664 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.687745 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.687875 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.687963 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.688151 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.688254 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.688376 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.688509 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.688589 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.688686 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.688798 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.789827 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.789884 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncxg5\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-kube-api-access-ncxg5\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.789913 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.789934 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.789988 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.790025 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.790068 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.790099 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.790140 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.790197 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.790226 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.790251 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.790282 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.790337 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.797212 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.797211 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.797560 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.798101 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.798345 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.798495 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.798687 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.798722 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.799100 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.799474 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.800163 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.799441 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.801073 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.808047 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncxg5\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-kube-api-access-ncxg5\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:53 crc kubenswrapper[4492]: I1126 07:18:53.958256 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:18:54 crc kubenswrapper[4492]: I1126 07:18:54.469643 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j"] Nov 26 07:18:54 crc kubenswrapper[4492]: I1126 07:18:54.553358 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" event={"ID":"036ed939-8979-4d38-bf6e-5bc35556315d","Type":"ContainerStarted","Data":"dec8a9c57698878209affb4cb3175bc1241a47acd9379c4385ed0410c2bd3b95"} Nov 26 07:18:55 crc kubenswrapper[4492]: I1126 07:18:55.564360 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" event={"ID":"036ed939-8979-4d38-bf6e-5bc35556315d","Type":"ContainerStarted","Data":"b26ded4a269e82822fc7f358738a42043e0aa8b5b64fcc8f58d16b795423ca90"} Nov 26 07:18:55 crc kubenswrapper[4492]: I1126 07:18:55.597951 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" podStartSLOduration=2.093465627 podStartE2EDuration="2.597935963s" podCreationTimestamp="2025-11-26 07:18:53 +0000 UTC" firstStartedPulling="2025-11-26 07:18:54.473547564 +0000 UTC m=+1830.357435862" lastFinishedPulling="2025-11-26 07:18:54.978017901 +0000 UTC m=+1830.861906198" observedRunningTime="2025-11-26 07:18:55.585199287 +0000 UTC m=+1831.469087585" watchObservedRunningTime="2025-11-26 07:18:55.597935963 +0000 UTC m=+1831.481824261" Nov 26 07:18:56 crc kubenswrapper[4492]: I1126 07:18:56.118292 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:56 crc kubenswrapper[4492]: I1126 07:18:56.162566 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:56 crc kubenswrapper[4492]: I1126 07:18:56.355594 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ss5gn"] Nov 26 07:18:57 crc kubenswrapper[4492]: I1126 07:18:57.578885 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ss5gn" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerName="registry-server" containerID="cri-o://6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab" gracePeriod=2 Nov 26 07:18:57 crc kubenswrapper[4492]: I1126 07:18:57.993600 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.182887 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48lbq\" (UniqueName: \"kubernetes.io/projected/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-kube-api-access-48lbq\") pod \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.183065 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-utilities\") pod \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.183190 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-catalog-content\") pod \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\" (UID: \"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd\") " Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.183820 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-utilities" (OuterVolumeSpecName: "utilities") pod "0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" (UID: "0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.188220 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-kube-api-access-48lbq" (OuterVolumeSpecName: "kube-api-access-48lbq") pod "0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" (UID: "0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd"). InnerVolumeSpecName "kube-api-access-48lbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.248678 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" (UID: "0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.286074 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.286110 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.286130 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48lbq\" (UniqueName: \"kubernetes.io/projected/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd-kube-api-access-48lbq\") on node \"crc\" DevicePath \"\"" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.549315 4492 scope.go:117] "RemoveContainer" containerID="967bab7794e098a626be43b5e37a02c3e091858de521fd6111b078bfa5a66c07" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.582898 4492 scope.go:117] "RemoveContainer" containerID="36aaf449a28c128ddd286a6f461f0647680fb7b4437971dfabdfb4417ce8432a" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.590609 4492 generic.go:334] "Generic (PLEG): container finished" podID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerID="6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab" exitCode=0 Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.590692 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ss5gn" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.590691 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss5gn" event={"ID":"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd","Type":"ContainerDied","Data":"6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab"} Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.590910 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ss5gn" event={"ID":"0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd","Type":"ContainerDied","Data":"192099d8bb4b8b486ce8a16732c60c7f8fc8b501b36931e10efaad81d81b2e86"} Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.590934 4492 scope.go:117] "RemoveContainer" containerID="6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.616428 4492 scope.go:117] "RemoveContainer" containerID="c85b3534b450b9c74a8b2d7534da2321ddb780d2e2d451888bc5f927c5b4a6ba" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.627801 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ss5gn"] Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.630889 4492 scope.go:117] "RemoveContainer" containerID="448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.634869 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ss5gn"] Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.658563 4492 scope.go:117] "RemoveContainer" containerID="edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.687225 4492 scope.go:117] "RemoveContainer" containerID="6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab" Nov 26 07:18:58 crc kubenswrapper[4492]: E1126 07:18:58.687730 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab\": container with ID starting with 6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab not found: ID does not exist" containerID="6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.687779 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab"} err="failed to get container status \"6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab\": rpc error: code = NotFound desc = could not find container \"6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab\": container with ID starting with 6a33674701d78ffb2a957ade1944e898c959f6f0a9cf7ab47d43dce8b149b4ab not found: ID does not exist" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.687813 4492 scope.go:117] "RemoveContainer" containerID="448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c" Nov 26 07:18:58 crc kubenswrapper[4492]: E1126 07:18:58.688196 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c\": container with ID starting with 448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c not found: ID does not exist" containerID="448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.688229 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c"} err="failed to get container status \"448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c\": rpc error: code = NotFound desc = could not find container \"448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c\": container with ID starting with 448d28b555110e5a32a99f58444e4f2904fec363e974e785b90f7cba2f69be0c not found: ID does not exist" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.688253 4492 scope.go:117] "RemoveContainer" containerID="edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5" Nov 26 07:18:58 crc kubenswrapper[4492]: E1126 07:18:58.688582 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5\": container with ID starting with edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5 not found: ID does not exist" containerID="edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5" Nov 26 07:18:58 crc kubenswrapper[4492]: I1126 07:18:58.688609 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5"} err="failed to get container status \"edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5\": rpc error: code = NotFound desc = could not find container \"edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5\": container with ID starting with edff7787912f61aceabf8e437a544fdee66fccac8ff5d0a45035aa7c0cfda1c5 not found: ID does not exist" Nov 26 07:19:00 crc kubenswrapper[4492]: I1126 07:19:00.449162 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" path="/var/lib/kubelet/pods/0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd/volumes" Nov 26 07:19:22 crc kubenswrapper[4492]: I1126 07:19:22.831339 4492 generic.go:334] "Generic (PLEG): container finished" podID="036ed939-8979-4d38-bf6e-5bc35556315d" containerID="b26ded4a269e82822fc7f358738a42043e0aa8b5b64fcc8f58d16b795423ca90" exitCode=0 Nov 26 07:19:22 crc kubenswrapper[4492]: I1126 07:19:22.831431 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" event={"ID":"036ed939-8979-4d38-bf6e-5bc35556315d","Type":"ContainerDied","Data":"b26ded4a269e82822fc7f358738a42043e0aa8b5b64fcc8f58d16b795423ca90"} Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.162633 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345315 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345411 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-repo-setup-combined-ca-bundle\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345521 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ovn-combined-ca-bundle\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345588 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-inventory\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345626 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-telemetry-combined-ca-bundle\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345700 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-nova-combined-ca-bundle\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345727 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-bootstrap-combined-ca-bundle\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345754 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncxg5\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-kube-api-access-ncxg5\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345852 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345875 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-libvirt-combined-ca-bundle\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345952 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345971 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ssh-key\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.345999 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.346093 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-neutron-metadata-combined-ca-bundle\") pod \"036ed939-8979-4d38-bf6e-5bc35556315d\" (UID: \"036ed939-8979-4d38-bf6e-5bc35556315d\") " Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.352601 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.354666 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.354702 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.354819 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.354822 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-kube-api-access-ncxg5" (OuterVolumeSpecName: "kube-api-access-ncxg5") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "kube-api-access-ncxg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.354986 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.355017 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.355189 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.355059 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.357668 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.357955 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.358269 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.374588 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-inventory" (OuterVolumeSpecName: "inventory") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.376330 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "036ed939-8979-4d38-bf6e-5bc35556315d" (UID: "036ed939-8979-4d38-bf6e-5bc35556315d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448871 4492 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448901 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448913 4492 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448926 4492 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448939 4492 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448949 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncxg5\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-kube-api-access-ncxg5\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448961 4492 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448974 4492 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448985 4492 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.448994 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.449004 4492 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.449026 4492 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.449040 4492 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/036ed939-8979-4d38-bf6e-5bc35556315d-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.449051 4492 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/036ed939-8979-4d38-bf6e-5bc35556315d-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.848975 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" event={"ID":"036ed939-8979-4d38-bf6e-5bc35556315d","Type":"ContainerDied","Data":"dec8a9c57698878209affb4cb3175bc1241a47acd9379c4385ed0410c2bd3b95"} Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.849314 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dec8a9c57698878209affb4cb3175bc1241a47acd9379c4385ed0410c2bd3b95" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.849030 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8vw6j" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.941757 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd"] Nov 26 07:19:24 crc kubenswrapper[4492]: E1126 07:19:24.942307 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerName="registry-server" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.942379 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerName="registry-server" Nov 26 07:19:24 crc kubenswrapper[4492]: E1126 07:19:24.942444 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerName="extract-content" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.942487 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerName="extract-content" Nov 26 07:19:24 crc kubenswrapper[4492]: E1126 07:19:24.942537 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036ed939-8979-4d38-bf6e-5bc35556315d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.942585 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="036ed939-8979-4d38-bf6e-5bc35556315d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 26 07:19:24 crc kubenswrapper[4492]: E1126 07:19:24.942652 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerName="extract-utilities" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.942700 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerName="extract-utilities" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.943111 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="036ed939-8979-4d38-bf6e-5bc35556315d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.943193 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e99d4d4-fe00-43d0-bf42-d49a2cea8bcd" containerName="registry-server" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.943758 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.945803 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.946036 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.946262 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.946300 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.946306 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.956249 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdjxh\" (UniqueName: \"kubernetes.io/projected/635ac185-0d83-440e-a771-7743a7c69d8d-kube-api-access-jdjxh\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.956406 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.956721 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.956778 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/635ac185-0d83-440e-a771-7743a7c69d8d-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.956901 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:24 crc kubenswrapper[4492]: I1126 07:19:24.958619 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd"] Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.060507 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdjxh\" (UniqueName: \"kubernetes.io/projected/635ac185-0d83-440e-a771-7743a7c69d8d-kube-api-access-jdjxh\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.060696 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.061152 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.061234 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/635ac185-0d83-440e-a771-7743a7c69d8d-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.061324 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.062166 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/635ac185-0d83-440e-a771-7743a7c69d8d-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.065455 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.066823 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.068826 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.075648 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdjxh\" (UniqueName: \"kubernetes.io/projected/635ac185-0d83-440e-a771-7743a7c69d8d-kube-api-access-jdjxh\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-qp5bd\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.287116 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:19:25 crc kubenswrapper[4492]: I1126 07:19:25.828354 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd"] Nov 26 07:19:26 crc kubenswrapper[4492]: I1126 07:19:26.886209 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" event={"ID":"635ac185-0d83-440e-a771-7743a7c69d8d","Type":"ContainerStarted","Data":"6b87c9e2d3c2a4f5c601ba480619d2e16ba8ae7d22718d8c02f3bc6ea1f53eb3"} Nov 26 07:19:26 crc kubenswrapper[4492]: I1126 07:19:26.886908 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" event={"ID":"635ac185-0d83-440e-a771-7743a7c69d8d","Type":"ContainerStarted","Data":"804beabe89b64125221be22d7005d3bc61bb10c534f7c4c4b119ee8e7dd663fd"} Nov 26 07:19:26 crc kubenswrapper[4492]: I1126 07:19:26.907974 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" podStartSLOduration=2.403358648 podStartE2EDuration="2.907950684s" podCreationTimestamp="2025-11-26 07:19:24 +0000 UTC" firstStartedPulling="2025-11-26 07:19:25.877303419 +0000 UTC m=+1861.761191718" lastFinishedPulling="2025-11-26 07:19:26.381895446 +0000 UTC m=+1862.265783754" observedRunningTime="2025-11-26 07:19:26.900996029 +0000 UTC m=+1862.784884327" watchObservedRunningTime="2025-11-26 07:19:26.907950684 +0000 UTC m=+1862.791838983" Nov 26 07:20:18 crc kubenswrapper[4492]: I1126 07:20:18.389149 4492 generic.go:334] "Generic (PLEG): container finished" podID="635ac185-0d83-440e-a771-7743a7c69d8d" containerID="6b87c9e2d3c2a4f5c601ba480619d2e16ba8ae7d22718d8c02f3bc6ea1f53eb3" exitCode=0 Nov 26 07:20:18 crc kubenswrapper[4492]: I1126 07:20:18.389248 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" event={"ID":"635ac185-0d83-440e-a771-7743a7c69d8d","Type":"ContainerDied","Data":"6b87c9e2d3c2a4f5c601ba480619d2e16ba8ae7d22718d8c02f3bc6ea1f53eb3"} Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.771904 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.848056 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ssh-key\") pod \"635ac185-0d83-440e-a771-7743a7c69d8d\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.848421 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdjxh\" (UniqueName: \"kubernetes.io/projected/635ac185-0d83-440e-a771-7743a7c69d8d-kube-api-access-jdjxh\") pod \"635ac185-0d83-440e-a771-7743a7c69d8d\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.848619 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/635ac185-0d83-440e-a771-7743a7c69d8d-ovncontroller-config-0\") pod \"635ac185-0d83-440e-a771-7743a7c69d8d\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.848835 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ovn-combined-ca-bundle\") pod \"635ac185-0d83-440e-a771-7743a7c69d8d\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.848986 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-inventory\") pod \"635ac185-0d83-440e-a771-7743a7c69d8d\" (UID: \"635ac185-0d83-440e-a771-7743a7c69d8d\") " Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.857799 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/635ac185-0d83-440e-a771-7743a7c69d8d-kube-api-access-jdjxh" (OuterVolumeSpecName: "kube-api-access-jdjxh") pod "635ac185-0d83-440e-a771-7743a7c69d8d" (UID: "635ac185-0d83-440e-a771-7743a7c69d8d"). InnerVolumeSpecName "kube-api-access-jdjxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.863803 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "635ac185-0d83-440e-a771-7743a7c69d8d" (UID: "635ac185-0d83-440e-a771-7743a7c69d8d"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.872119 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/635ac185-0d83-440e-a771-7743a7c69d8d-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "635ac185-0d83-440e-a771-7743a7c69d8d" (UID: "635ac185-0d83-440e-a771-7743a7c69d8d"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.875548 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-inventory" (OuterVolumeSpecName: "inventory") pod "635ac185-0d83-440e-a771-7743a7c69d8d" (UID: "635ac185-0d83-440e-a771-7743a7c69d8d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.877845 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "635ac185-0d83-440e-a771-7743a7c69d8d" (UID: "635ac185-0d83-440e-a771-7743a7c69d8d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.952111 4492 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.952140 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.952150 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/635ac185-0d83-440e-a771-7743a7c69d8d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.952159 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdjxh\" (UniqueName: \"kubernetes.io/projected/635ac185-0d83-440e-a771-7743a7c69d8d-kube-api-access-jdjxh\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:19 crc kubenswrapper[4492]: I1126 07:20:19.952206 4492 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/635ac185-0d83-440e-a771-7743a7c69d8d-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.407899 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" event={"ID":"635ac185-0d83-440e-a771-7743a7c69d8d","Type":"ContainerDied","Data":"804beabe89b64125221be22d7005d3bc61bb10c534f7c4c4b119ee8e7dd663fd"} Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.407966 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="804beabe89b64125221be22d7005d3bc61bb10c534f7c4c4b119ee8e7dd663fd" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.408059 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-qp5bd" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.513010 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv"] Nov 26 07:20:20 crc kubenswrapper[4492]: E1126 07:20:20.514041 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="635ac185-0d83-440e-a771-7743a7c69d8d" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.514192 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="635ac185-0d83-440e-a771-7743a7c69d8d" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.522997 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="635ac185-0d83-440e-a771-7743a7c69d8d" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.523752 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv"] Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.523926 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.527247 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.528423 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.528544 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.528693 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.528976 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.529221 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.666250 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc2pn\" (UniqueName: \"kubernetes.io/projected/d46298b0-6667-4e4f-984d-0117c5633726-kube-api-access-zc2pn\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.666365 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.666475 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.666521 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.666745 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.666843 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.770764 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.771804 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.771981 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc2pn\" (UniqueName: \"kubernetes.io/projected/d46298b0-6667-4e4f-984d-0117c5633726-kube-api-access-zc2pn\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.772197 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.772319 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.772372 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.778359 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.778450 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.781006 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.787404 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.788142 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.793668 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc2pn\" (UniqueName: \"kubernetes.io/projected/d46298b0-6667-4e4f-984d-0117c5633726-kube-api-access-zc2pn\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:20 crc kubenswrapper[4492]: I1126 07:20:20.841560 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:20:21 crc kubenswrapper[4492]: I1126 07:20:21.327842 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv"] Nov 26 07:20:21 crc kubenswrapper[4492]: I1126 07:20:21.418234 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" event={"ID":"d46298b0-6667-4e4f-984d-0117c5633726","Type":"ContainerStarted","Data":"15dab81bf6efd564d641e61c9343bdaadead8fc8ca06e4fec7a28de1645291f8"} Nov 26 07:20:22 crc kubenswrapper[4492]: I1126 07:20:22.427626 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" event={"ID":"d46298b0-6667-4e4f-984d-0117c5633726","Type":"ContainerStarted","Data":"6926b7094026c1dfd7bfd7b044995eb66bab91df6cc0915a5f66d3a0121577f1"} Nov 26 07:20:22 crc kubenswrapper[4492]: I1126 07:20:22.452273 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" podStartSLOduration=1.837172111 podStartE2EDuration="2.45225761s" podCreationTimestamp="2025-11-26 07:20:20 +0000 UTC" firstStartedPulling="2025-11-26 07:20:21.338899257 +0000 UTC m=+1917.222787556" lastFinishedPulling="2025-11-26 07:20:21.953984757 +0000 UTC m=+1917.837873055" observedRunningTime="2025-11-26 07:20:22.445559545 +0000 UTC m=+1918.329447844" watchObservedRunningTime="2025-11-26 07:20:22.45225761 +0000 UTC m=+1918.336145908" Nov 26 07:20:59 crc kubenswrapper[4492]: I1126 07:20:59.795631 4492 generic.go:334] "Generic (PLEG): container finished" podID="d46298b0-6667-4e4f-984d-0117c5633726" containerID="6926b7094026c1dfd7bfd7b044995eb66bab91df6cc0915a5f66d3a0121577f1" exitCode=0 Nov 26 07:20:59 crc kubenswrapper[4492]: I1126 07:20:59.795756 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" event={"ID":"d46298b0-6667-4e4f-984d-0117c5633726","Type":"ContainerDied","Data":"6926b7094026c1dfd7bfd7b044995eb66bab91df6cc0915a5f66d3a0121577f1"} Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.147626 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.338728 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zc2pn\" (UniqueName: \"kubernetes.io/projected/d46298b0-6667-4e4f-984d-0117c5633726-kube-api-access-zc2pn\") pod \"d46298b0-6667-4e4f-984d-0117c5633726\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.338785 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-inventory\") pod \"d46298b0-6667-4e4f-984d-0117c5633726\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.338814 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-ssh-key\") pod \"d46298b0-6667-4e4f-984d-0117c5633726\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.338887 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-metadata-combined-ca-bundle\") pod \"d46298b0-6667-4e4f-984d-0117c5633726\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.338995 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-nova-metadata-neutron-config-0\") pod \"d46298b0-6667-4e4f-984d-0117c5633726\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.339084 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-ovn-metadata-agent-neutron-config-0\") pod \"d46298b0-6667-4e4f-984d-0117c5633726\" (UID: \"d46298b0-6667-4e4f-984d-0117c5633726\") " Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.346337 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "d46298b0-6667-4e4f-984d-0117c5633726" (UID: "d46298b0-6667-4e4f-984d-0117c5633726"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.346970 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d46298b0-6667-4e4f-984d-0117c5633726-kube-api-access-zc2pn" (OuterVolumeSpecName: "kube-api-access-zc2pn") pod "d46298b0-6667-4e4f-984d-0117c5633726" (UID: "d46298b0-6667-4e4f-984d-0117c5633726"). InnerVolumeSpecName "kube-api-access-zc2pn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.363907 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-inventory" (OuterVolumeSpecName: "inventory") pod "d46298b0-6667-4e4f-984d-0117c5633726" (UID: "d46298b0-6667-4e4f-984d-0117c5633726"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.366519 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "d46298b0-6667-4e4f-984d-0117c5633726" (UID: "d46298b0-6667-4e4f-984d-0117c5633726"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.368406 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "d46298b0-6667-4e4f-984d-0117c5633726" (UID: "d46298b0-6667-4e4f-984d-0117c5633726"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.368737 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d46298b0-6667-4e4f-984d-0117c5633726" (UID: "d46298b0-6667-4e4f-984d-0117c5633726"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.441248 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zc2pn\" (UniqueName: \"kubernetes.io/projected/d46298b0-6667-4e4f-984d-0117c5633726-kube-api-access-zc2pn\") on node \"crc\" DevicePath \"\"" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.441286 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.441307 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.441368 4492 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.441415 4492 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.441436 4492 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d46298b0-6667-4e4f-984d-0117c5633726-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.815547 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" event={"ID":"d46298b0-6667-4e4f-984d-0117c5633726","Type":"ContainerDied","Data":"15dab81bf6efd564d641e61c9343bdaadead8fc8ca06e4fec7a28de1645291f8"} Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.815593 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15dab81bf6efd564d641e61c9343bdaadead8fc8ca06e4fec7a28de1645291f8" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.815633 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m9jbv" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.938499 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg"] Nov 26 07:21:01 crc kubenswrapper[4492]: E1126 07:21:01.938843 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d46298b0-6667-4e4f-984d-0117c5633726" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.938864 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d46298b0-6667-4e4f-984d-0117c5633726" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.939047 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d46298b0-6667-4e4f-984d-0117c5633726" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.939641 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.941821 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.942314 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.942600 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.942841 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.945298 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 26 07:21:01 crc kubenswrapper[4492]: I1126 07:21:01.949557 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg"] Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.055196 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.056258 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.056357 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.056551 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sstvd\" (UniqueName: \"kubernetes.io/projected/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-kube-api-access-sstvd\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.056586 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.158233 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.158355 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.158407 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.158512 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sstvd\" (UniqueName: \"kubernetes.io/projected/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-kube-api-access-sstvd\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.158536 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.162929 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.162979 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.163247 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.164607 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.175515 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sstvd\" (UniqueName: \"kubernetes.io/projected/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-kube-api-access-sstvd\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.254329 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.757671 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg"] Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.764201 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:21:02 crc kubenswrapper[4492]: I1126 07:21:02.825022 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" event={"ID":"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b","Type":"ContainerStarted","Data":"b72d61b644f68ccdbb5ff7d32b8eae2cc285cbbaaca248faa1feb93a30ade57a"} Nov 26 07:21:03 crc kubenswrapper[4492]: I1126 07:21:03.838422 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" event={"ID":"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b","Type":"ContainerStarted","Data":"df5b2d5c579150d95283edae6ef68eef8a731fe76f85263c9939e6f520c0ecf5"} Nov 26 07:21:03 crc kubenswrapper[4492]: I1126 07:21:03.853974 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" podStartSLOduration=2.3502956680000002 podStartE2EDuration="2.853953505s" podCreationTimestamp="2025-11-26 07:21:01 +0000 UTC" firstStartedPulling="2025-11-26 07:21:02.763950286 +0000 UTC m=+1958.647838584" lastFinishedPulling="2025-11-26 07:21:03.267608123 +0000 UTC m=+1959.151496421" observedRunningTime="2025-11-26 07:21:03.853273565 +0000 UTC m=+1959.737161864" watchObservedRunningTime="2025-11-26 07:21:03.853953505 +0000 UTC m=+1959.737841802" Nov 26 07:21:19 crc kubenswrapper[4492]: I1126 07:21:19.442048 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:21:19 crc kubenswrapper[4492]: I1126 07:21:19.442724 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:21:49 crc kubenswrapper[4492]: I1126 07:21:49.441157 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:21:49 crc kubenswrapper[4492]: I1126 07:21:49.443024 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:22:19 crc kubenswrapper[4492]: I1126 07:22:19.441932 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:22:19 crc kubenswrapper[4492]: I1126 07:22:19.442482 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:22:19 crc kubenswrapper[4492]: I1126 07:22:19.442532 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:22:19 crc kubenswrapper[4492]: I1126 07:22:19.442938 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"334da1a59c09d707f16e7e3825ec5855d6020e57d3066cd28e967266790a8121"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:22:19 crc kubenswrapper[4492]: I1126 07:22:19.442993 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://334da1a59c09d707f16e7e3825ec5855d6020e57d3066cd28e967266790a8121" gracePeriod=600 Nov 26 07:22:20 crc kubenswrapper[4492]: I1126 07:22:20.554005 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="334da1a59c09d707f16e7e3825ec5855d6020e57d3066cd28e967266790a8121" exitCode=0 Nov 26 07:22:20 crc kubenswrapper[4492]: I1126 07:22:20.554080 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"334da1a59c09d707f16e7e3825ec5855d6020e57d3066cd28e967266790a8121"} Nov 26 07:22:20 crc kubenswrapper[4492]: I1126 07:22:20.554568 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb"} Nov 26 07:22:20 crc kubenswrapper[4492]: I1126 07:22:20.554588 4492 scope.go:117] "RemoveContainer" containerID="e66a01ef992e57d5211153b60046f2247dc264cdaa804a19ffc29e563f14e227" Nov 26 07:24:19 crc kubenswrapper[4492]: I1126 07:24:19.442046 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:24:19 crc kubenswrapper[4492]: I1126 07:24:19.442778 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:24:22 crc kubenswrapper[4492]: I1126 07:24:22.776394 4492 generic.go:334] "Generic (PLEG): container finished" podID="d0563b3c-c009-4d1b-b921-9a2c0ec1a93b" containerID="df5b2d5c579150d95283edae6ef68eef8a731fe76f85263c9939e6f520c0ecf5" exitCode=0 Nov 26 07:24:22 crc kubenswrapper[4492]: I1126 07:24:22.776477 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" event={"ID":"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b","Type":"ContainerDied","Data":"df5b2d5c579150d95283edae6ef68eef8a731fe76f85263c9939e6f520c0ecf5"} Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.172874 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.177925 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-secret-0\") pod \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.177985 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-combined-ca-bundle\") pod \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.178065 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-ssh-key\") pod \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.178108 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sstvd\" (UniqueName: \"kubernetes.io/projected/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-kube-api-access-sstvd\") pod \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.185391 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b" (UID: "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.185410 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-kube-api-access-sstvd" (OuterVolumeSpecName: "kube-api-access-sstvd") pod "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b" (UID: "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b"). InnerVolumeSpecName "kube-api-access-sstvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.221889 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b" (UID: "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.247789 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b" (UID: "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.280588 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-inventory\") pod \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\" (UID: \"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b\") " Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.281325 4492 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.281627 4492 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.281642 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.281653 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sstvd\" (UniqueName: \"kubernetes.io/projected/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-kube-api-access-sstvd\") on node \"crc\" DevicePath \"\"" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.304259 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-inventory" (OuterVolumeSpecName: "inventory") pod "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b" (UID: "d0563b3c-c009-4d1b-b921-9a2c0ec1a93b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.383095 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d0563b3c-c009-4d1b-b921-9a2c0ec1a93b-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.799315 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" event={"ID":"d0563b3c-c009-4d1b-b921-9a2c0ec1a93b","Type":"ContainerDied","Data":"b72d61b644f68ccdbb5ff7d32b8eae2cc285cbbaaca248faa1feb93a30ade57a"} Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.799670 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b72d61b644f68ccdbb5ff7d32b8eae2cc285cbbaaca248faa1feb93a30ade57a" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.799410 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ns7zg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.878436 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg"] Nov 26 07:24:24 crc kubenswrapper[4492]: E1126 07:24:24.879123 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0563b3c-c009-4d1b-b921-9a2c0ec1a93b" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.879241 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0563b3c-c009-4d1b-b921-9a2c0ec1a93b" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.879525 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0563b3c-c009-4d1b-b921-9a2c0ec1a93b" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.880478 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.886510 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.886689 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.886716 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.886782 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.886995 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.887469 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.887858 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.890663 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg"] Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.997056 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.997901 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.997980 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.998102 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-982pf\" (UniqueName: \"kubernetes.io/projected/b7e9897d-b833-4ccd-b937-28c5e7a7d542-kube-api-access-982pf\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.998215 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.998314 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.998380 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.998416 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:24 crc kubenswrapper[4492]: I1126 07:24:24.998522 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.100141 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.100262 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.100309 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.100379 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-982pf\" (UniqueName: \"kubernetes.io/projected/b7e9897d-b833-4ccd-b937-28c5e7a7d542-kube-api-access-982pf\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.100407 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.100443 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.100479 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.100505 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.100581 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.102149 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.107265 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.117692 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.117900 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.118412 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.118717 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.119284 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.119766 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.123791 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-982pf\" (UniqueName: \"kubernetes.io/projected/b7e9897d-b833-4ccd-b937-28c5e7a7d542-kube-api-access-982pf\") pod \"nova-edpm-deployment-openstack-edpm-ipam-b5glg\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.198069 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:24:25 crc kubenswrapper[4492]: E1126 07:24:25.449720 4492 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0563b3c_c009_4d1b_b921_9a2c0ec1a93b.slice\": RecentStats: unable to find data in memory cache]" Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.709078 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg"] Nov 26 07:24:25 crc kubenswrapper[4492]: I1126 07:24:25.822349 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" event={"ID":"b7e9897d-b833-4ccd-b937-28c5e7a7d542","Type":"ContainerStarted","Data":"1255840a145d898dff8b62ba630f8b2d373c66ba3ed124289bd023fbce9ea152"} Nov 26 07:24:26 crc kubenswrapper[4492]: I1126 07:24:26.834327 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" event={"ID":"b7e9897d-b833-4ccd-b937-28c5e7a7d542","Type":"ContainerStarted","Data":"c4f5199b0c39049658d369fdfd9f1d8752736fd3ad9c604f184658089b5a03cc"} Nov 26 07:24:26 crc kubenswrapper[4492]: I1126 07:24:26.858167 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" podStartSLOduration=2.171867377 podStartE2EDuration="2.858149796s" podCreationTimestamp="2025-11-26 07:24:24 +0000 UTC" firstStartedPulling="2025-11-26 07:24:25.714265051 +0000 UTC m=+2161.598153350" lastFinishedPulling="2025-11-26 07:24:26.400547481 +0000 UTC m=+2162.284435769" observedRunningTime="2025-11-26 07:24:26.848619516 +0000 UTC m=+2162.732507803" watchObservedRunningTime="2025-11-26 07:24:26.858149796 +0000 UTC m=+2162.742038083" Nov 26 07:24:35 crc kubenswrapper[4492]: E1126 07:24:35.707534 4492 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0563b3c_c009_4d1b_b921_9a2c0ec1a93b.slice\": RecentStats: unable to find data in memory cache]" Nov 26 07:24:45 crc kubenswrapper[4492]: E1126 07:24:45.991541 4492 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0563b3c_c009_4d1b_b921_9a2c0ec1a93b.slice\": RecentStats: unable to find data in memory cache]" Nov 26 07:24:49 crc kubenswrapper[4492]: I1126 07:24:49.441116 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:24:49 crc kubenswrapper[4492]: I1126 07:24:49.441516 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:24:56 crc kubenswrapper[4492]: E1126 07:24:56.247026 4492 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0563b3c_c009_4d1b_b921_9a2c0ec1a93b.slice\": RecentStats: unable to find data in memory cache]" Nov 26 07:25:06 crc kubenswrapper[4492]: E1126 07:25:06.519442 4492 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0563b3c_c009_4d1b_b921_9a2c0ec1a93b.slice\": RecentStats: unable to find data in memory cache]" Nov 26 07:25:16 crc kubenswrapper[4492]: E1126 07:25:16.766151 4492 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0563b3c_c009_4d1b_b921_9a2c0ec1a93b.slice\": RecentStats: unable to find data in memory cache]" Nov 26 07:25:19 crc kubenswrapper[4492]: I1126 07:25:19.441463 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:25:19 crc kubenswrapper[4492]: I1126 07:25:19.441902 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:25:19 crc kubenswrapper[4492]: I1126 07:25:19.441979 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:25:19 crc kubenswrapper[4492]: I1126 07:25:19.443263 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:25:19 crc kubenswrapper[4492]: I1126 07:25:19.443343 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" gracePeriod=600 Nov 26 07:25:19 crc kubenswrapper[4492]: E1126 07:25:19.578821 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:25:20 crc kubenswrapper[4492]: I1126 07:25:20.408996 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" exitCode=0 Nov 26 07:25:20 crc kubenswrapper[4492]: I1126 07:25:20.409053 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb"} Nov 26 07:25:20 crc kubenswrapper[4492]: I1126 07:25:20.409118 4492 scope.go:117] "RemoveContainer" containerID="334da1a59c09d707f16e7e3825ec5855d6020e57d3066cd28e967266790a8121" Nov 26 07:25:20 crc kubenswrapper[4492]: I1126 07:25:20.410307 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:25:20 crc kubenswrapper[4492]: E1126 07:25:20.410663 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.169050 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wj4fr"] Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.178228 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.202885 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wj4fr"] Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.264643 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-utilities\") pod \"redhat-marketplace-wj4fr\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.264694 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-catalog-content\") pod \"redhat-marketplace-wj4fr\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.264761 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2wcs\" (UniqueName: \"kubernetes.io/projected/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-kube-api-access-g2wcs\") pod \"redhat-marketplace-wj4fr\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.367405 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-utilities\") pod \"redhat-marketplace-wj4fr\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.367456 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-catalog-content\") pod \"redhat-marketplace-wj4fr\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.367931 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-utilities\") pod \"redhat-marketplace-wj4fr\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.367950 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-catalog-content\") pod \"redhat-marketplace-wj4fr\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.368035 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2wcs\" (UniqueName: \"kubernetes.io/projected/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-kube-api-access-g2wcs\") pod \"redhat-marketplace-wj4fr\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.389499 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2wcs\" (UniqueName: \"kubernetes.io/projected/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-kube-api-access-g2wcs\") pod \"redhat-marketplace-wj4fr\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:25 crc kubenswrapper[4492]: I1126 07:25:25.495463 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:26 crc kubenswrapper[4492]: I1126 07:25:26.025744 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wj4fr"] Nov 26 07:25:26 crc kubenswrapper[4492]: W1126 07:25:26.029881 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7efa89c6_ca72_4ab6_b840_686ec4ba19a8.slice/crio-ec145bb8acd9f260d87d6f40cc91b7955cfcea4c3cacc0b0e1a5263fb66a8973 WatchSource:0}: Error finding container ec145bb8acd9f260d87d6f40cc91b7955cfcea4c3cacc0b0e1a5263fb66a8973: Status 404 returned error can't find the container with id ec145bb8acd9f260d87d6f40cc91b7955cfcea4c3cacc0b0e1a5263fb66a8973 Nov 26 07:25:26 crc kubenswrapper[4492]: I1126 07:25:26.475034 4492 generic.go:334] "Generic (PLEG): container finished" podID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerID="5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3" exitCode=0 Nov 26 07:25:26 crc kubenswrapper[4492]: I1126 07:25:26.475121 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wj4fr" event={"ID":"7efa89c6-ca72-4ab6-b840-686ec4ba19a8","Type":"ContainerDied","Data":"5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3"} Nov 26 07:25:26 crc kubenswrapper[4492]: I1126 07:25:26.475186 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wj4fr" event={"ID":"7efa89c6-ca72-4ab6-b840-686ec4ba19a8","Type":"ContainerStarted","Data":"ec145bb8acd9f260d87d6f40cc91b7955cfcea4c3cacc0b0e1a5263fb66a8973"} Nov 26 07:25:27 crc kubenswrapper[4492]: I1126 07:25:27.487189 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wj4fr" event={"ID":"7efa89c6-ca72-4ab6-b840-686ec4ba19a8","Type":"ContainerStarted","Data":"726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7"} Nov 26 07:25:28 crc kubenswrapper[4492]: I1126 07:25:28.498269 4492 generic.go:334] "Generic (PLEG): container finished" podID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerID="726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7" exitCode=0 Nov 26 07:25:28 crc kubenswrapper[4492]: I1126 07:25:28.498313 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wj4fr" event={"ID":"7efa89c6-ca72-4ab6-b840-686ec4ba19a8","Type":"ContainerDied","Data":"726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7"} Nov 26 07:25:29 crc kubenswrapper[4492]: I1126 07:25:29.512298 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wj4fr" event={"ID":"7efa89c6-ca72-4ab6-b840-686ec4ba19a8","Type":"ContainerStarted","Data":"2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2"} Nov 26 07:25:29 crc kubenswrapper[4492]: I1126 07:25:29.539574 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wj4fr" podStartSLOduration=2.016290945 podStartE2EDuration="4.539557727s" podCreationTimestamp="2025-11-26 07:25:25 +0000 UTC" firstStartedPulling="2025-11-26 07:25:26.477054021 +0000 UTC m=+2222.360942319" lastFinishedPulling="2025-11-26 07:25:29.000320782 +0000 UTC m=+2224.884209101" observedRunningTime="2025-11-26 07:25:29.532870132 +0000 UTC m=+2225.416758431" watchObservedRunningTime="2025-11-26 07:25:29.539557727 +0000 UTC m=+2225.423446025" Nov 26 07:25:35 crc kubenswrapper[4492]: I1126 07:25:35.439450 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:25:35 crc kubenswrapper[4492]: E1126 07:25:35.440557 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:25:35 crc kubenswrapper[4492]: I1126 07:25:35.495805 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:35 crc kubenswrapper[4492]: I1126 07:25:35.496476 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:35 crc kubenswrapper[4492]: I1126 07:25:35.539436 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:35 crc kubenswrapper[4492]: I1126 07:25:35.609129 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:35 crc kubenswrapper[4492]: I1126 07:25:35.796203 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wj4fr"] Nov 26 07:25:37 crc kubenswrapper[4492]: I1126 07:25:37.587612 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wj4fr" podUID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerName="registry-server" containerID="cri-o://2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2" gracePeriod=2 Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.026456 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.104836 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-utilities\") pod \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.105032 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-catalog-content\") pod \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.105193 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2wcs\" (UniqueName: \"kubernetes.io/projected/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-kube-api-access-g2wcs\") pod \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\" (UID: \"7efa89c6-ca72-4ab6-b840-686ec4ba19a8\") " Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.105824 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-utilities" (OuterVolumeSpecName: "utilities") pod "7efa89c6-ca72-4ab6-b840-686ec4ba19a8" (UID: "7efa89c6-ca72-4ab6-b840-686ec4ba19a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.112257 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-kube-api-access-g2wcs" (OuterVolumeSpecName: "kube-api-access-g2wcs") pod "7efa89c6-ca72-4ab6-b840-686ec4ba19a8" (UID: "7efa89c6-ca72-4ab6-b840-686ec4ba19a8"). InnerVolumeSpecName "kube-api-access-g2wcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.118904 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7efa89c6-ca72-4ab6-b840-686ec4ba19a8" (UID: "7efa89c6-ca72-4ab6-b840-686ec4ba19a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.208656 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2wcs\" (UniqueName: \"kubernetes.io/projected/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-kube-api-access-g2wcs\") on node \"crc\" DevicePath \"\"" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.208700 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.208712 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7efa89c6-ca72-4ab6-b840-686ec4ba19a8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.603600 4492 generic.go:334] "Generic (PLEG): container finished" podID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerID="2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2" exitCode=0 Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.604812 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wj4fr" event={"ID":"7efa89c6-ca72-4ab6-b840-686ec4ba19a8","Type":"ContainerDied","Data":"2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2"} Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.605800 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wj4fr" event={"ID":"7efa89c6-ca72-4ab6-b840-686ec4ba19a8","Type":"ContainerDied","Data":"ec145bb8acd9f260d87d6f40cc91b7955cfcea4c3cacc0b0e1a5263fb66a8973"} Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.605887 4492 scope.go:117] "RemoveContainer" containerID="2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.604915 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wj4fr" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.636384 4492 scope.go:117] "RemoveContainer" containerID="726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.651301 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wj4fr"] Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.694365 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wj4fr"] Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.730974 4492 scope.go:117] "RemoveContainer" containerID="5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.779950 4492 scope.go:117] "RemoveContainer" containerID="2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2" Nov 26 07:25:38 crc kubenswrapper[4492]: E1126 07:25:38.781165 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2\": container with ID starting with 2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2 not found: ID does not exist" containerID="2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.781296 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2"} err="failed to get container status \"2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2\": rpc error: code = NotFound desc = could not find container \"2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2\": container with ID starting with 2833617d75541c20f07fb3711afc1ed6d13d61c9b15abe8ea8e551ffaf251ec2 not found: ID does not exist" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.781324 4492 scope.go:117] "RemoveContainer" containerID="726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7" Nov 26 07:25:38 crc kubenswrapper[4492]: E1126 07:25:38.785330 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7\": container with ID starting with 726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7 not found: ID does not exist" containerID="726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.785379 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7"} err="failed to get container status \"726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7\": rpc error: code = NotFound desc = could not find container \"726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7\": container with ID starting with 726e269f8eba042ea65a9af6eaed6bf0a7b216c01a84b3da6dd4a5bf9129c6b7 not found: ID does not exist" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.785409 4492 scope.go:117] "RemoveContainer" containerID="5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3" Nov 26 07:25:38 crc kubenswrapper[4492]: E1126 07:25:38.785718 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3\": container with ID starting with 5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3 not found: ID does not exist" containerID="5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3" Nov 26 07:25:38 crc kubenswrapper[4492]: I1126 07:25:38.785799 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3"} err="failed to get container status \"5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3\": rpc error: code = NotFound desc = could not find container \"5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3\": container with ID starting with 5ceead2ea97d397268243f3dee7ab25623ee5961572e5bc1ed7512fe26dd94d3 not found: ID does not exist" Nov 26 07:25:40 crc kubenswrapper[4492]: I1126 07:25:40.451433 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" path="/var/lib/kubelet/pods/7efa89c6-ca72-4ab6-b840-686ec4ba19a8/volumes" Nov 26 07:25:50 crc kubenswrapper[4492]: I1126 07:25:50.438414 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:25:50 crc kubenswrapper[4492]: E1126 07:25:50.439505 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:26:05 crc kubenswrapper[4492]: I1126 07:26:05.438780 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:26:05 crc kubenswrapper[4492]: E1126 07:26:05.441329 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:26:17 crc kubenswrapper[4492]: I1126 07:26:17.439737 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:26:17 crc kubenswrapper[4492]: E1126 07:26:17.440896 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:26:29 crc kubenswrapper[4492]: I1126 07:26:29.439444 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:26:29 crc kubenswrapper[4492]: E1126 07:26:29.440508 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.447861 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6nwbd"] Nov 26 07:26:32 crc kubenswrapper[4492]: E1126 07:26:32.448612 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerName="extract-content" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.448626 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerName="extract-content" Nov 26 07:26:32 crc kubenswrapper[4492]: E1126 07:26:32.448653 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerName="extract-utilities" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.448661 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerName="extract-utilities" Nov 26 07:26:32 crc kubenswrapper[4492]: E1126 07:26:32.448680 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerName="registry-server" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.448686 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerName="registry-server" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.448893 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7efa89c6-ca72-4ab6-b840-686ec4ba19a8" containerName="registry-server" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.450261 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.468264 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6nwbd"] Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.554200 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqgv7\" (UniqueName: \"kubernetes.io/projected/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-kube-api-access-tqgv7\") pod \"certified-operators-6nwbd\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.554270 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-catalog-content\") pod \"certified-operators-6nwbd\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.554391 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-utilities\") pod \"certified-operators-6nwbd\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.655852 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-utilities\") pod \"certified-operators-6nwbd\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.656023 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqgv7\" (UniqueName: \"kubernetes.io/projected/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-kube-api-access-tqgv7\") pod \"certified-operators-6nwbd\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.656102 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-catalog-content\") pod \"certified-operators-6nwbd\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.656556 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-catalog-content\") pod \"certified-operators-6nwbd\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.656656 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-utilities\") pod \"certified-operators-6nwbd\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.678986 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqgv7\" (UniqueName: \"kubernetes.io/projected/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-kube-api-access-tqgv7\") pod \"certified-operators-6nwbd\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:32 crc kubenswrapper[4492]: I1126 07:26:32.770592 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:33 crc kubenswrapper[4492]: I1126 07:26:33.341443 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6nwbd"] Nov 26 07:26:34 crc kubenswrapper[4492]: I1126 07:26:34.211071 4492 generic.go:334] "Generic (PLEG): container finished" podID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerID="bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a" exitCode=0 Nov 26 07:26:34 crc kubenswrapper[4492]: I1126 07:26:34.211192 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6nwbd" event={"ID":"32ae2814-91a3-471e-916a-2c1ca0cfe0bb","Type":"ContainerDied","Data":"bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a"} Nov 26 07:26:34 crc kubenswrapper[4492]: I1126 07:26:34.211514 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6nwbd" event={"ID":"32ae2814-91a3-471e-916a-2c1ca0cfe0bb","Type":"ContainerStarted","Data":"50cc9a77accdc0df97c0021371e306bfebc3bca1aef8673bc2e898d09e5d7988"} Nov 26 07:26:34 crc kubenswrapper[4492]: I1126 07:26:34.213208 4492 generic.go:334] "Generic (PLEG): container finished" podID="b7e9897d-b833-4ccd-b937-28c5e7a7d542" containerID="c4f5199b0c39049658d369fdfd9f1d8752736fd3ad9c604f184658089b5a03cc" exitCode=0 Nov 26 07:26:34 crc kubenswrapper[4492]: I1126 07:26:34.213231 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" event={"ID":"b7e9897d-b833-4ccd-b937-28c5e7a7d542","Type":"ContainerDied","Data":"c4f5199b0c39049658d369fdfd9f1d8752736fd3ad9c604f184658089b5a03cc"} Nov 26 07:26:34 crc kubenswrapper[4492]: I1126 07:26:34.213685 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.231291 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6nwbd" event={"ID":"32ae2814-91a3-471e-916a-2c1ca0cfe0bb","Type":"ContainerStarted","Data":"cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f"} Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.594122 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.701423 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-ssh-key\") pod \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.701927 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-1\") pod \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.702018 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-0\") pod \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.702763 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-inventory\") pod \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.702792 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-combined-ca-bundle\") pod \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.702843 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-982pf\" (UniqueName: \"kubernetes.io/projected/b7e9897d-b833-4ccd-b937-28c5e7a7d542-kube-api-access-982pf\") pod \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.702871 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-0\") pod \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.702915 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-extra-config-0\") pod \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.702962 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-1\") pod \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\" (UID: \"b7e9897d-b833-4ccd-b937-28c5e7a7d542\") " Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.716705 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7e9897d-b833-4ccd-b937-28c5e7a7d542-kube-api-access-982pf" (OuterVolumeSpecName: "kube-api-access-982pf") pod "b7e9897d-b833-4ccd-b937-28c5e7a7d542" (UID: "b7e9897d-b833-4ccd-b937-28c5e7a7d542"). InnerVolumeSpecName "kube-api-access-982pf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.724596 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "b7e9897d-b833-4ccd-b937-28c5e7a7d542" (UID: "b7e9897d-b833-4ccd-b937-28c5e7a7d542"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.748987 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b7e9897d-b833-4ccd-b937-28c5e7a7d542" (UID: "b7e9897d-b833-4ccd-b937-28c5e7a7d542"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.752933 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-inventory" (OuterVolumeSpecName: "inventory") pod "b7e9897d-b833-4ccd-b937-28c5e7a7d542" (UID: "b7e9897d-b833-4ccd-b937-28c5e7a7d542"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.753963 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "b7e9897d-b833-4ccd-b937-28c5e7a7d542" (UID: "b7e9897d-b833-4ccd-b937-28c5e7a7d542"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.757427 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "b7e9897d-b833-4ccd-b937-28c5e7a7d542" (UID: "b7e9897d-b833-4ccd-b937-28c5e7a7d542"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.765336 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "b7e9897d-b833-4ccd-b937-28c5e7a7d542" (UID: "b7e9897d-b833-4ccd-b937-28c5e7a7d542"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.773481 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "b7e9897d-b833-4ccd-b937-28c5e7a7d542" (UID: "b7e9897d-b833-4ccd-b937-28c5e7a7d542"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.778321 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "b7e9897d-b833-4ccd-b937-28c5e7a7d542" (UID: "b7e9897d-b833-4ccd-b937-28c5e7a7d542"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.805750 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.805858 4492 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.805948 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-982pf\" (UniqueName: \"kubernetes.io/projected/b7e9897d-b833-4ccd-b937-28c5e7a7d542-kube-api-access-982pf\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.806022 4492 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.806098 4492 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.806213 4492 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.806290 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.806355 4492 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:35 crc kubenswrapper[4492]: I1126 07:26:35.806420 4492 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/b7e9897d-b833-4ccd-b937-28c5e7a7d542-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.242603 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" event={"ID":"b7e9897d-b833-4ccd-b937-28c5e7a7d542","Type":"ContainerDied","Data":"1255840a145d898dff8b62ba630f8b2d373c66ba3ed124289bd023fbce9ea152"} Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.242982 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1255840a145d898dff8b62ba630f8b2d373c66ba3ed124289bd023fbce9ea152" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.242660 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-b5glg" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.245896 4492 generic.go:334] "Generic (PLEG): container finished" podID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerID="cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f" exitCode=0 Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.245963 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6nwbd" event={"ID":"32ae2814-91a3-471e-916a-2c1ca0cfe0bb","Type":"ContainerDied","Data":"cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f"} Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.362467 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl"] Nov 26 07:26:36 crc kubenswrapper[4492]: E1126 07:26:36.363007 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7e9897d-b833-4ccd-b937-28c5e7a7d542" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.363028 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7e9897d-b833-4ccd-b937-28c5e7a7d542" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.363306 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7e9897d-b833-4ccd-b937-28c5e7a7d542" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.364080 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.367300 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-mxgqb" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.367331 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.367355 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.367488 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.367503 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.371083 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl"] Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.420026 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.420218 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.420325 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.420434 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.420541 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.420657 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mttl8\" (UniqueName: \"kubernetes.io/projected/bd69c3ed-a17f-46f9-9838-732ab7bdacef-kube-api-access-mttl8\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.420743 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.521752 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mttl8\" (UniqueName: \"kubernetes.io/projected/bd69c3ed-a17f-46f9-9838-732ab7bdacef-kube-api-access-mttl8\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.521951 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.522128 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.522270 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.522364 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.522463 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.522582 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.528950 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.528977 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.529066 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.529343 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.530751 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.530832 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.544537 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mttl8\" (UniqueName: \"kubernetes.io/projected/bd69c3ed-a17f-46f9-9838-732ab7bdacef-kube-api-access-mttl8\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:36 crc kubenswrapper[4492]: I1126 07:26:36.680826 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:26:37 crc kubenswrapper[4492]: I1126 07:26:37.219702 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl"] Nov 26 07:26:37 crc kubenswrapper[4492]: W1126 07:26:37.221775 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd69c3ed_a17f_46f9_9838_732ab7bdacef.slice/crio-ea794b6432faa5e6f86d7c68e51cb1dffaf092122d3cc59f16e695c693983b4f WatchSource:0}: Error finding container ea794b6432faa5e6f86d7c68e51cb1dffaf092122d3cc59f16e695c693983b4f: Status 404 returned error can't find the container with id ea794b6432faa5e6f86d7c68e51cb1dffaf092122d3cc59f16e695c693983b4f Nov 26 07:26:37 crc kubenswrapper[4492]: I1126 07:26:37.255755 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6nwbd" event={"ID":"32ae2814-91a3-471e-916a-2c1ca0cfe0bb","Type":"ContainerStarted","Data":"8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333"} Nov 26 07:26:37 crc kubenswrapper[4492]: I1126 07:26:37.258823 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" event={"ID":"bd69c3ed-a17f-46f9-9838-732ab7bdacef","Type":"ContainerStarted","Data":"ea794b6432faa5e6f86d7c68e51cb1dffaf092122d3cc59f16e695c693983b4f"} Nov 26 07:26:37 crc kubenswrapper[4492]: I1126 07:26:37.283860 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6nwbd" podStartSLOduration=2.7908143770000002 podStartE2EDuration="5.283847279s" podCreationTimestamp="2025-11-26 07:26:32 +0000 UTC" firstStartedPulling="2025-11-26 07:26:34.21345011 +0000 UTC m=+2290.097338408" lastFinishedPulling="2025-11-26 07:26:36.706483023 +0000 UTC m=+2292.590371310" observedRunningTime="2025-11-26 07:26:37.269090962 +0000 UTC m=+2293.152979260" watchObservedRunningTime="2025-11-26 07:26:37.283847279 +0000 UTC m=+2293.167735578" Nov 26 07:26:38 crc kubenswrapper[4492]: I1126 07:26:38.279523 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" event={"ID":"bd69c3ed-a17f-46f9-9838-732ab7bdacef","Type":"ContainerStarted","Data":"e4aed0d98928d308270b285e47b61bca437c09086623175c6633fd51cc6a3181"} Nov 26 07:26:38 crc kubenswrapper[4492]: I1126 07:26:38.311219 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" podStartSLOduration=1.826896984 podStartE2EDuration="2.311203321s" podCreationTimestamp="2025-11-26 07:26:36 +0000 UTC" firstStartedPulling="2025-11-26 07:26:37.224158815 +0000 UTC m=+2293.108047113" lastFinishedPulling="2025-11-26 07:26:37.708465152 +0000 UTC m=+2293.592353450" observedRunningTime="2025-11-26 07:26:38.310845488 +0000 UTC m=+2294.194733786" watchObservedRunningTime="2025-11-26 07:26:38.311203321 +0000 UTC m=+2294.195091619" Nov 26 07:26:42 crc kubenswrapper[4492]: I1126 07:26:42.771560 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:42 crc kubenswrapper[4492]: I1126 07:26:42.772470 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:42 crc kubenswrapper[4492]: I1126 07:26:42.811962 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:43 crc kubenswrapper[4492]: I1126 07:26:43.377349 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:43 crc kubenswrapper[4492]: I1126 07:26:43.429833 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6nwbd"] Nov 26 07:26:44 crc kubenswrapper[4492]: I1126 07:26:44.438284 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:26:44 crc kubenswrapper[4492]: E1126 07:26:44.438558 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.357229 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6nwbd" podUID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerName="registry-server" containerID="cri-o://8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333" gracePeriod=2 Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.749262 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.845712 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-utilities\") pod \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.845861 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-catalog-content\") pod \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.845898 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqgv7\" (UniqueName: \"kubernetes.io/projected/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-kube-api-access-tqgv7\") pod \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\" (UID: \"32ae2814-91a3-471e-916a-2c1ca0cfe0bb\") " Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.846625 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-utilities" (OuterVolumeSpecName: "utilities") pod "32ae2814-91a3-471e-916a-2c1ca0cfe0bb" (UID: "32ae2814-91a3-471e-916a-2c1ca0cfe0bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.852004 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-kube-api-access-tqgv7" (OuterVolumeSpecName: "kube-api-access-tqgv7") pod "32ae2814-91a3-471e-916a-2c1ca0cfe0bb" (UID: "32ae2814-91a3-471e-916a-2c1ca0cfe0bb"). InnerVolumeSpecName "kube-api-access-tqgv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.894075 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "32ae2814-91a3-471e-916a-2c1ca0cfe0bb" (UID: "32ae2814-91a3-471e-916a-2c1ca0cfe0bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.950111 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.950183 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:45 crc kubenswrapper[4492]: I1126 07:26:45.950203 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqgv7\" (UniqueName: \"kubernetes.io/projected/32ae2814-91a3-471e-916a-2c1ca0cfe0bb-kube-api-access-tqgv7\") on node \"crc\" DevicePath \"\"" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.372901 4492 generic.go:334] "Generic (PLEG): container finished" podID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerID="8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333" exitCode=0 Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.372957 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6nwbd" event={"ID":"32ae2814-91a3-471e-916a-2c1ca0cfe0bb","Type":"ContainerDied","Data":"8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333"} Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.372991 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6nwbd" event={"ID":"32ae2814-91a3-471e-916a-2c1ca0cfe0bb","Type":"ContainerDied","Data":"50cc9a77accdc0df97c0021371e306bfebc3bca1aef8673bc2e898d09e5d7988"} Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.373014 4492 scope.go:117] "RemoveContainer" containerID="8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.373441 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6nwbd" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.396398 4492 scope.go:117] "RemoveContainer" containerID="cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.419887 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6nwbd"] Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.428119 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6nwbd"] Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.430385 4492 scope.go:117] "RemoveContainer" containerID="bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.450438 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" path="/var/lib/kubelet/pods/32ae2814-91a3-471e-916a-2c1ca0cfe0bb/volumes" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.463834 4492 scope.go:117] "RemoveContainer" containerID="8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333" Nov 26 07:26:46 crc kubenswrapper[4492]: E1126 07:26:46.464418 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333\": container with ID starting with 8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333 not found: ID does not exist" containerID="8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.464483 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333"} err="failed to get container status \"8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333\": rpc error: code = NotFound desc = could not find container \"8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333\": container with ID starting with 8874059dd6927c208c81af33a5aa5225ecac91296e3e27bde292f51d6cd11333 not found: ID does not exist" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.464526 4492 scope.go:117] "RemoveContainer" containerID="cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f" Nov 26 07:26:46 crc kubenswrapper[4492]: E1126 07:26:46.464992 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f\": container with ID starting with cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f not found: ID does not exist" containerID="cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.465036 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f"} err="failed to get container status \"cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f\": rpc error: code = NotFound desc = could not find container \"cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f\": container with ID starting with cc84c2bb4ef9c84b153eb5c4c14b1841297a3ebd473fdc70aec5211bcd22f52f not found: ID does not exist" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.465068 4492 scope.go:117] "RemoveContainer" containerID="bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a" Nov 26 07:26:46 crc kubenswrapper[4492]: E1126 07:26:46.465478 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a\": container with ID starting with bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a not found: ID does not exist" containerID="bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a" Nov 26 07:26:46 crc kubenswrapper[4492]: I1126 07:26:46.465518 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a"} err="failed to get container status \"bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a\": rpc error: code = NotFound desc = could not find container \"bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a\": container with ID starting with bdcbbfcbc48680105bc356d919d12aef3d4e228e86657d370f3337e212eeaa9a not found: ID does not exist" Nov 26 07:26:59 crc kubenswrapper[4492]: I1126 07:26:59.438648 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:26:59 crc kubenswrapper[4492]: E1126 07:26:59.441316 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:27:12 crc kubenswrapper[4492]: I1126 07:27:12.439406 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:27:12 crc kubenswrapper[4492]: E1126 07:27:12.440461 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:27:25 crc kubenswrapper[4492]: I1126 07:27:25.439338 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:27:25 crc kubenswrapper[4492]: E1126 07:27:25.440644 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:27:40 crc kubenswrapper[4492]: I1126 07:27:40.439883 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:27:40 crc kubenswrapper[4492]: E1126 07:27:40.442217 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:27:51 crc kubenswrapper[4492]: I1126 07:27:51.439152 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:27:51 crc kubenswrapper[4492]: E1126 07:27:51.440438 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:28:03 crc kubenswrapper[4492]: I1126 07:28:03.438262 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:28:03 crc kubenswrapper[4492]: E1126 07:28:03.439123 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:28:15 crc kubenswrapper[4492]: I1126 07:28:15.439668 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:28:15 crc kubenswrapper[4492]: E1126 07:28:15.440452 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:28:26 crc kubenswrapper[4492]: I1126 07:28:26.439320 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:28:26 crc kubenswrapper[4492]: E1126 07:28:26.440435 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:28:33 crc kubenswrapper[4492]: I1126 07:28:33.375824 4492 generic.go:334] "Generic (PLEG): container finished" podID="bd69c3ed-a17f-46f9-9838-732ab7bdacef" containerID="e4aed0d98928d308270b285e47b61bca437c09086623175c6633fd51cc6a3181" exitCode=0 Nov 26 07:28:33 crc kubenswrapper[4492]: I1126 07:28:33.375925 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" event={"ID":"bd69c3ed-a17f-46f9-9838-732ab7bdacef","Type":"ContainerDied","Data":"e4aed0d98928d308270b285e47b61bca437c09086623175c6633fd51cc6a3181"} Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.747202 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.922219 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ssh-key\") pod \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.922266 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-0\") pod \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.922293 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-1\") pod \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.922324 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-telemetry-combined-ca-bundle\") pod \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.922386 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mttl8\" (UniqueName: \"kubernetes.io/projected/bd69c3ed-a17f-46f9-9838-732ab7bdacef-kube-api-access-mttl8\") pod \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.922407 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-2\") pod \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.922434 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-inventory\") pod \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\" (UID: \"bd69c3ed-a17f-46f9-9838-732ab7bdacef\") " Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.930558 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd69c3ed-a17f-46f9-9838-732ab7bdacef-kube-api-access-mttl8" (OuterVolumeSpecName: "kube-api-access-mttl8") pod "bd69c3ed-a17f-46f9-9838-732ab7bdacef" (UID: "bd69c3ed-a17f-46f9-9838-732ab7bdacef"). InnerVolumeSpecName "kube-api-access-mttl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.941761 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "bd69c3ed-a17f-46f9-9838-732ab7bdacef" (UID: "bd69c3ed-a17f-46f9-9838-732ab7bdacef"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.949023 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "bd69c3ed-a17f-46f9-9838-732ab7bdacef" (UID: "bd69c3ed-a17f-46f9-9838-732ab7bdacef"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.949742 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "bd69c3ed-a17f-46f9-9838-732ab7bdacef" (UID: "bd69c3ed-a17f-46f9-9838-732ab7bdacef"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.950029 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "bd69c3ed-a17f-46f9-9838-732ab7bdacef" (UID: "bd69c3ed-a17f-46f9-9838-732ab7bdacef"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.951021 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-inventory" (OuterVolumeSpecName: "inventory") pod "bd69c3ed-a17f-46f9-9838-732ab7bdacef" (UID: "bd69c3ed-a17f-46f9-9838-732ab7bdacef"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:28:34 crc kubenswrapper[4492]: I1126 07:28:34.954294 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bd69c3ed-a17f-46f9-9838-732ab7bdacef" (UID: "bd69c3ed-a17f-46f9-9838-732ab7bdacef"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.024780 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mttl8\" (UniqueName: \"kubernetes.io/projected/bd69c3ed-a17f-46f9-9838-732ab7bdacef-kube-api-access-mttl8\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.024820 4492 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.024831 4492 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.024840 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.024850 4492 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.024862 4492 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.024871 4492 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd69c3ed-a17f-46f9-9838-732ab7bdacef-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.396738 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" event={"ID":"bd69c3ed-a17f-46f9-9838-732ab7bdacef","Type":"ContainerDied","Data":"ea794b6432faa5e6f86d7c68e51cb1dffaf092122d3cc59f16e695c693983b4f"} Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.397016 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea794b6432faa5e6f86d7c68e51cb1dffaf092122d3cc59f16e695c693983b4f" Nov 26 07:28:35 crc kubenswrapper[4492]: I1126 07:28:35.396769 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-rvrwl" Nov 26 07:28:38 crc kubenswrapper[4492]: I1126 07:28:38.438409 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:28:38 crc kubenswrapper[4492]: E1126 07:28:38.439293 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.680787 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d6q6s"] Nov 26 07:28:43 crc kubenswrapper[4492]: E1126 07:28:43.682065 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerName="extract-content" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.682077 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerName="extract-content" Nov 26 07:28:43 crc kubenswrapper[4492]: E1126 07:28:43.682096 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerName="extract-utilities" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.682102 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerName="extract-utilities" Nov 26 07:28:43 crc kubenswrapper[4492]: E1126 07:28:43.682130 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerName="registry-server" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.682137 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerName="registry-server" Nov 26 07:28:43 crc kubenswrapper[4492]: E1126 07:28:43.682146 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd69c3ed-a17f-46f9-9838-732ab7bdacef" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.682153 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd69c3ed-a17f-46f9-9838-732ab7bdacef" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.682363 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd69c3ed-a17f-46f9-9838-732ab7bdacef" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.682389 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="32ae2814-91a3-471e-916a-2c1ca0cfe0bb" containerName="registry-server" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.684036 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.703695 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d6q6s"] Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.804315 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt57b\" (UniqueName: \"kubernetes.io/projected/c31ee669-545f-497b-9993-2c6eb332f407-kube-api-access-nt57b\") pod \"community-operators-d6q6s\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.804493 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-catalog-content\") pod \"community-operators-d6q6s\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.804566 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-utilities\") pod \"community-operators-d6q6s\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.907151 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt57b\" (UniqueName: \"kubernetes.io/projected/c31ee669-545f-497b-9993-2c6eb332f407-kube-api-access-nt57b\") pod \"community-operators-d6q6s\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.907253 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-catalog-content\") pod \"community-operators-d6q6s\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.907334 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-utilities\") pod \"community-operators-d6q6s\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.908011 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-utilities\") pod \"community-operators-d6q6s\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.908157 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-catalog-content\") pod \"community-operators-d6q6s\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:43 crc kubenswrapper[4492]: I1126 07:28:43.928331 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt57b\" (UniqueName: \"kubernetes.io/projected/c31ee669-545f-497b-9993-2c6eb332f407-kube-api-access-nt57b\") pod \"community-operators-d6q6s\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:44 crc kubenswrapper[4492]: I1126 07:28:44.003964 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:44 crc kubenswrapper[4492]: I1126 07:28:44.528162 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d6q6s"] Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.478834 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q2pzb"] Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.483876 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.496738 4492 generic.go:334] "Generic (PLEG): container finished" podID="c31ee669-545f-497b-9993-2c6eb332f407" containerID="01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091" exitCode=0 Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.496768 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6q6s" event={"ID":"c31ee669-545f-497b-9993-2c6eb332f407","Type":"ContainerDied","Data":"01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091"} Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.496785 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6q6s" event={"ID":"c31ee669-545f-497b-9993-2c6eb332f407","Type":"ContainerStarted","Data":"fe0104f8fdbde585d9a7710deb1a66caee7e6ffcc22dbb3da29ba1aaaceb30ec"} Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.503079 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q2pzb"] Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.548554 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb2r8\" (UniqueName: \"kubernetes.io/projected/b08dea6c-5680-4b28-973e-6f5d3a8d2876-kube-api-access-fb2r8\") pod \"redhat-operators-q2pzb\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.548614 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-catalog-content\") pod \"redhat-operators-q2pzb\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.548636 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-utilities\") pod \"redhat-operators-q2pzb\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.649251 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb2r8\" (UniqueName: \"kubernetes.io/projected/b08dea6c-5680-4b28-973e-6f5d3a8d2876-kube-api-access-fb2r8\") pod \"redhat-operators-q2pzb\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.649313 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-catalog-content\") pod \"redhat-operators-q2pzb\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.649343 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-utilities\") pod \"redhat-operators-q2pzb\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.649848 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-utilities\") pod \"redhat-operators-q2pzb\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.650377 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-catalog-content\") pod \"redhat-operators-q2pzb\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.679209 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb2r8\" (UniqueName: \"kubernetes.io/projected/b08dea6c-5680-4b28-973e-6f5d3a8d2876-kube-api-access-fb2r8\") pod \"redhat-operators-q2pzb\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:45 crc kubenswrapper[4492]: I1126 07:28:45.808160 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:46 crc kubenswrapper[4492]: I1126 07:28:46.277653 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q2pzb"] Nov 26 07:28:46 crc kubenswrapper[4492]: I1126 07:28:46.517527 4492 generic.go:334] "Generic (PLEG): container finished" podID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerID="b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec" exitCode=0 Nov 26 07:28:46 crc kubenswrapper[4492]: I1126 07:28:46.517619 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2pzb" event={"ID":"b08dea6c-5680-4b28-973e-6f5d3a8d2876","Type":"ContainerDied","Data":"b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec"} Nov 26 07:28:46 crc kubenswrapper[4492]: I1126 07:28:46.517876 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2pzb" event={"ID":"b08dea6c-5680-4b28-973e-6f5d3a8d2876","Type":"ContainerStarted","Data":"b070e03f6e64e98ca34eec6d9e88cda818d24310087a2ef975f60d050b61d399"} Nov 26 07:28:46 crc kubenswrapper[4492]: I1126 07:28:46.521776 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6q6s" event={"ID":"c31ee669-545f-497b-9993-2c6eb332f407","Type":"ContainerStarted","Data":"db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639"} Nov 26 07:28:47 crc kubenswrapper[4492]: I1126 07:28:47.532794 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2pzb" event={"ID":"b08dea6c-5680-4b28-973e-6f5d3a8d2876","Type":"ContainerStarted","Data":"ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d"} Nov 26 07:28:47 crc kubenswrapper[4492]: I1126 07:28:47.535011 4492 generic.go:334] "Generic (PLEG): container finished" podID="c31ee669-545f-497b-9993-2c6eb332f407" containerID="db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639" exitCode=0 Nov 26 07:28:47 crc kubenswrapper[4492]: I1126 07:28:47.535060 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6q6s" event={"ID":"c31ee669-545f-497b-9993-2c6eb332f407","Type":"ContainerDied","Data":"db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639"} Nov 26 07:28:48 crc kubenswrapper[4492]: I1126 07:28:48.546444 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6q6s" event={"ID":"c31ee669-545f-497b-9993-2c6eb332f407","Type":"ContainerStarted","Data":"bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24"} Nov 26 07:28:48 crc kubenswrapper[4492]: I1126 07:28:48.571911 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d6q6s" podStartSLOduration=3.061919247 podStartE2EDuration="5.571895182s" podCreationTimestamp="2025-11-26 07:28:43 +0000 UTC" firstStartedPulling="2025-11-26 07:28:45.499095074 +0000 UTC m=+2421.382983372" lastFinishedPulling="2025-11-26 07:28:48.009071009 +0000 UTC m=+2423.892959307" observedRunningTime="2025-11-26 07:28:48.561320557 +0000 UTC m=+2424.445208856" watchObservedRunningTime="2025-11-26 07:28:48.571895182 +0000 UTC m=+2424.455783480" Nov 26 07:28:49 crc kubenswrapper[4492]: I1126 07:28:49.556086 4492 generic.go:334] "Generic (PLEG): container finished" podID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerID="ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d" exitCode=0 Nov 26 07:28:49 crc kubenswrapper[4492]: I1126 07:28:49.556155 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2pzb" event={"ID":"b08dea6c-5680-4b28-973e-6f5d3a8d2876","Type":"ContainerDied","Data":"ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d"} Nov 26 07:28:50 crc kubenswrapper[4492]: I1126 07:28:50.568738 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2pzb" event={"ID":"b08dea6c-5680-4b28-973e-6f5d3a8d2876","Type":"ContainerStarted","Data":"26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb"} Nov 26 07:28:50 crc kubenswrapper[4492]: I1126 07:28:50.601556 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q2pzb" podStartSLOduration=2.04235121 podStartE2EDuration="5.601528816s" podCreationTimestamp="2025-11-26 07:28:45 +0000 UTC" firstStartedPulling="2025-11-26 07:28:46.519414498 +0000 UTC m=+2422.403302796" lastFinishedPulling="2025-11-26 07:28:50.078592105 +0000 UTC m=+2425.962480402" observedRunningTime="2025-11-26 07:28:50.584894669 +0000 UTC m=+2426.468782966" watchObservedRunningTime="2025-11-26 07:28:50.601528816 +0000 UTC m=+2426.485417114" Nov 26 07:28:53 crc kubenswrapper[4492]: I1126 07:28:53.439074 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:28:53 crc kubenswrapper[4492]: E1126 07:28:53.439718 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:28:54 crc kubenswrapper[4492]: I1126 07:28:54.005127 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:54 crc kubenswrapper[4492]: I1126 07:28:54.005222 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:54 crc kubenswrapper[4492]: I1126 07:28:54.045317 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:54 crc kubenswrapper[4492]: I1126 07:28:54.638713 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:55 crc kubenswrapper[4492]: I1126 07:28:55.272209 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d6q6s"] Nov 26 07:28:55 crc kubenswrapper[4492]: I1126 07:28:55.630259 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-645d6d85d7-cvr9h" podUID="e44b94a7-c7a7-40e1-8d00-9f27e0e0639e" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 26 07:28:55 crc kubenswrapper[4492]: I1126 07:28:55.808465 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:55 crc kubenswrapper[4492]: I1126 07:28:55.808548 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:28:56 crc kubenswrapper[4492]: I1126 07:28:56.623486 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d6q6s" podUID="c31ee669-545f-497b-9993-2c6eb332f407" containerName="registry-server" containerID="cri-o://bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24" gracePeriod=2 Nov 26 07:28:56 crc kubenswrapper[4492]: I1126 07:28:56.858127 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q2pzb" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerName="registry-server" probeResult="failure" output=< Nov 26 07:28:56 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 07:28:56 crc kubenswrapper[4492]: > Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.063260 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.109580 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nt57b\" (UniqueName: \"kubernetes.io/projected/c31ee669-545f-497b-9993-2c6eb332f407-kube-api-access-nt57b\") pod \"c31ee669-545f-497b-9993-2c6eb332f407\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.109848 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-utilities\") pod \"c31ee669-545f-497b-9993-2c6eb332f407\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.109958 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-catalog-content\") pod \"c31ee669-545f-497b-9993-2c6eb332f407\" (UID: \"c31ee669-545f-497b-9993-2c6eb332f407\") " Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.110531 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-utilities" (OuterVolumeSpecName: "utilities") pod "c31ee669-545f-497b-9993-2c6eb332f407" (UID: "c31ee669-545f-497b-9993-2c6eb332f407"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.111577 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.121373 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c31ee669-545f-497b-9993-2c6eb332f407-kube-api-access-nt57b" (OuterVolumeSpecName: "kube-api-access-nt57b") pod "c31ee669-545f-497b-9993-2c6eb332f407" (UID: "c31ee669-545f-497b-9993-2c6eb332f407"). InnerVolumeSpecName "kube-api-access-nt57b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.171424 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c31ee669-545f-497b-9993-2c6eb332f407" (UID: "c31ee669-545f-497b-9993-2c6eb332f407"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.214917 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c31ee669-545f-497b-9993-2c6eb332f407-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.214977 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nt57b\" (UniqueName: \"kubernetes.io/projected/c31ee669-545f-497b-9993-2c6eb332f407-kube-api-access-nt57b\") on node \"crc\" DevicePath \"\"" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.633503 4492 generic.go:334] "Generic (PLEG): container finished" podID="c31ee669-545f-497b-9993-2c6eb332f407" containerID="bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24" exitCode=0 Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.633564 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6q6s" event={"ID":"c31ee669-545f-497b-9993-2c6eb332f407","Type":"ContainerDied","Data":"bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24"} Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.633603 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d6q6s" event={"ID":"c31ee669-545f-497b-9993-2c6eb332f407","Type":"ContainerDied","Data":"fe0104f8fdbde585d9a7710deb1a66caee7e6ffcc22dbb3da29ba1aaaceb30ec"} Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.633623 4492 scope.go:117] "RemoveContainer" containerID="bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.633569 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d6q6s" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.664281 4492 scope.go:117] "RemoveContainer" containerID="db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.668107 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d6q6s"] Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.674068 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d6q6s"] Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.689019 4492 scope.go:117] "RemoveContainer" containerID="01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.730350 4492 scope.go:117] "RemoveContainer" containerID="bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24" Nov 26 07:28:57 crc kubenswrapper[4492]: E1126 07:28:57.730747 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24\": container with ID starting with bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24 not found: ID does not exist" containerID="bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.730780 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24"} err="failed to get container status \"bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24\": rpc error: code = NotFound desc = could not find container \"bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24\": container with ID starting with bf5c8586fd26249e2a7a4909087a2320b220f900c42c89060c33190296cc8b24 not found: ID does not exist" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.730806 4492 scope.go:117] "RemoveContainer" containerID="db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639" Nov 26 07:28:57 crc kubenswrapper[4492]: E1126 07:28:57.731083 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639\": container with ID starting with db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639 not found: ID does not exist" containerID="db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.731137 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639"} err="failed to get container status \"db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639\": rpc error: code = NotFound desc = could not find container \"db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639\": container with ID starting with db433211380908beaa85714875c46ae32b9ee9b323eefa6d9a250f808ae9a639 not found: ID does not exist" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.731201 4492 scope.go:117] "RemoveContainer" containerID="01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091" Nov 26 07:28:57 crc kubenswrapper[4492]: E1126 07:28:57.731551 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091\": container with ID starting with 01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091 not found: ID does not exist" containerID="01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091" Nov 26 07:28:57 crc kubenswrapper[4492]: I1126 07:28:57.731578 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091"} err="failed to get container status \"01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091\": rpc error: code = NotFound desc = could not find container \"01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091\": container with ID starting with 01fc1d8393e1ebf3b3ddc2f522b3cfd13a7c28d89828f2d78ada19b1a4b72091 not found: ID does not exist" Nov 26 07:28:58 crc kubenswrapper[4492]: I1126 07:28:58.448395 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c31ee669-545f-497b-9993-2c6eb332f407" path="/var/lib/kubelet/pods/c31ee669-545f-497b-9993-2c6eb332f407/volumes" Nov 26 07:29:05 crc kubenswrapper[4492]: I1126 07:29:05.850016 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:29:05 crc kubenswrapper[4492]: I1126 07:29:05.891946 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:29:06 crc kubenswrapper[4492]: I1126 07:29:06.087954 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q2pzb"] Nov 26 07:29:06 crc kubenswrapper[4492]: I1126 07:29:06.440440 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:29:06 crc kubenswrapper[4492]: E1126 07:29:06.441204 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:29:07 crc kubenswrapper[4492]: I1126 07:29:07.731661 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q2pzb" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerName="registry-server" containerID="cri-o://26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb" gracePeriod=2 Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.114194 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.249454 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fb2r8\" (UniqueName: \"kubernetes.io/projected/b08dea6c-5680-4b28-973e-6f5d3a8d2876-kube-api-access-fb2r8\") pod \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.249520 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-catalog-content\") pod \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.249632 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-utilities\") pod \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\" (UID: \"b08dea6c-5680-4b28-973e-6f5d3a8d2876\") " Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.250345 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-utilities" (OuterVolumeSpecName: "utilities") pod "b08dea6c-5680-4b28-973e-6f5d3a8d2876" (UID: "b08dea6c-5680-4b28-973e-6f5d3a8d2876"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.256358 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b08dea6c-5680-4b28-973e-6f5d3a8d2876-kube-api-access-fb2r8" (OuterVolumeSpecName: "kube-api-access-fb2r8") pod "b08dea6c-5680-4b28-973e-6f5d3a8d2876" (UID: "b08dea6c-5680-4b28-973e-6f5d3a8d2876"). InnerVolumeSpecName "kube-api-access-fb2r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.321279 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b08dea6c-5680-4b28-973e-6f5d3a8d2876" (UID: "b08dea6c-5680-4b28-973e-6f5d3a8d2876"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.352138 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.352196 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b08dea6c-5680-4b28-973e-6f5d3a8d2876-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.352211 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fb2r8\" (UniqueName: \"kubernetes.io/projected/b08dea6c-5680-4b28-973e-6f5d3a8d2876-kube-api-access-fb2r8\") on node \"crc\" DevicePath \"\"" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.741775 4492 generic.go:334] "Generic (PLEG): container finished" podID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerID="26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb" exitCode=0 Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.741845 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2pzb" event={"ID":"b08dea6c-5680-4b28-973e-6f5d3a8d2876","Type":"ContainerDied","Data":"26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb"} Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.741908 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q2pzb" event={"ID":"b08dea6c-5680-4b28-973e-6f5d3a8d2876","Type":"ContainerDied","Data":"b070e03f6e64e98ca34eec6d9e88cda818d24310087a2ef975f60d050b61d399"} Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.741923 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q2pzb" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.741932 4492 scope.go:117] "RemoveContainer" containerID="26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.763625 4492 scope.go:117] "RemoveContainer" containerID="ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.774879 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q2pzb"] Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.781224 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q2pzb"] Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.787859 4492 scope.go:117] "RemoveContainer" containerID="b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.823097 4492 scope.go:117] "RemoveContainer" containerID="26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb" Nov 26 07:29:08 crc kubenswrapper[4492]: E1126 07:29:08.823565 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb\": container with ID starting with 26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb not found: ID does not exist" containerID="26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.823609 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb"} err="failed to get container status \"26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb\": rpc error: code = NotFound desc = could not find container \"26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb\": container with ID starting with 26d5dd5c437bb216fced12a25d5a5098336feb09e4449ee34e88fbca819df8bb not found: ID does not exist" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.823633 4492 scope.go:117] "RemoveContainer" containerID="ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d" Nov 26 07:29:08 crc kubenswrapper[4492]: E1126 07:29:08.823993 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d\": container with ID starting with ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d not found: ID does not exist" containerID="ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.824032 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d"} err="failed to get container status \"ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d\": rpc error: code = NotFound desc = could not find container \"ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d\": container with ID starting with ecf5ab02232f24385d84d641604b29ca39832b5378c582de744bb57c7e5e3d0d not found: ID does not exist" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.824062 4492 scope.go:117] "RemoveContainer" containerID="b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec" Nov 26 07:29:08 crc kubenswrapper[4492]: E1126 07:29:08.824509 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec\": container with ID starting with b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec not found: ID does not exist" containerID="b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec" Nov 26 07:29:08 crc kubenswrapper[4492]: I1126 07:29:08.824554 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec"} err="failed to get container status \"b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec\": rpc error: code = NotFound desc = could not find container \"b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec\": container with ID starting with b0f89234cbe7313f258b223daacad633f81ee6cfed4151e007e9ddef632bf7ec not found: ID does not exist" Nov 26 07:29:10 crc kubenswrapper[4492]: I1126 07:29:10.451879 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" path="/var/lib/kubelet/pods/b08dea6c-5680-4b28-973e-6f5d3a8d2876/volumes" Nov 26 07:29:20 crc kubenswrapper[4492]: I1126 07:29:20.439306 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:29:20 crc kubenswrapper[4492]: E1126 07:29:20.442846 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.257888 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest-s00-multi-thread-testing"] Nov 26 07:29:27 crc kubenswrapper[4492]: E1126 07:29:27.258610 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerName="extract-utilities" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.258623 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerName="extract-utilities" Nov 26 07:29:27 crc kubenswrapper[4492]: E1126 07:29:27.258639 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c31ee669-545f-497b-9993-2c6eb332f407" containerName="extract-content" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.258644 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c31ee669-545f-497b-9993-2c6eb332f407" containerName="extract-content" Nov 26 07:29:27 crc kubenswrapper[4492]: E1126 07:29:27.258655 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerName="extract-content" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.258663 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerName="extract-content" Nov 26 07:29:27 crc kubenswrapper[4492]: E1126 07:29:27.258672 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerName="registry-server" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.258677 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerName="registry-server" Nov 26 07:29:27 crc kubenswrapper[4492]: E1126 07:29:27.258686 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c31ee669-545f-497b-9993-2c6eb332f407" containerName="registry-server" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.258692 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c31ee669-545f-497b-9993-2c6eb332f407" containerName="registry-server" Nov 26 07:29:27 crc kubenswrapper[4492]: E1126 07:29:27.258713 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c31ee669-545f-497b-9993-2c6eb332f407" containerName="extract-utilities" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.258718 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c31ee669-545f-497b-9993-2c6eb332f407" containerName="extract-utilities" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.258881 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="c31ee669-545f-497b-9993-2c6eb332f407" containerName="registry-server" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.258902 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b08dea6c-5680-4b28-973e-6f5d3a8d2876" containerName="registry-server" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.259468 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.268767 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.268938 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.269122 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.269301 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-flqkh" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.279996 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s00-multi-thread-testing"] Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.357707 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config-secret\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.357818 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-config-data\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.357853 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.459112 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ca-certs\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.459152 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.459222 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config-secret\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.459251 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ssh-key\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.459271 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scc7s\" (UniqueName: \"kubernetes.io/projected/63d7860b-0c48-4075-8658-58e2567d8abf-kube-api-access-scc7s\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.459364 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.459459 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.459499 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-config-data\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.459539 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.460567 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.460779 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-config-data\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.465896 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config-secret\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.562292 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ca-certs\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.562352 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.562380 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ssh-key\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.562402 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scc7s\" (UniqueName: \"kubernetes.io/projected/63d7860b-0c48-4075-8658-58e2567d8abf-kube-api-access-scc7s\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.562443 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.562521 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.563002 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.563251 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.564295 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.568127 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ssh-key\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.571328 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ca-certs\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.583126 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scc7s\" (UniqueName: \"kubernetes.io/projected/63d7860b-0c48-4075-8658-58e2567d8abf-kube-api-access-scc7s\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.594725 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:27 crc kubenswrapper[4492]: I1126 07:29:27.878895 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 07:29:28 crc kubenswrapper[4492]: I1126 07:29:28.391352 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s00-multi-thread-testing"] Nov 26 07:29:28 crc kubenswrapper[4492]: I1126 07:29:28.951309 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"63d7860b-0c48-4075-8658-58e2567d8abf","Type":"ContainerStarted","Data":"5546b5d42a9cf2e18a3c7760344aa0baddd56a74468acb3f7496682f153e9c46"} Nov 26 07:29:33 crc kubenswrapper[4492]: I1126 07:29:33.437941 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:29:33 crc kubenswrapper[4492]: E1126 07:29:33.438725 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:29:47 crc kubenswrapper[4492]: I1126 07:29:47.438478 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:29:47 crc kubenswrapper[4492]: E1126 07:29:47.439122 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.135691 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4"] Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.137161 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.138542 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.138793 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.147529 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4"] Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.230446 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47c6z\" (UniqueName: \"kubernetes.io/projected/5489cb28-d168-47f1-9976-98f3d6993ec0-kube-api-access-47c6z\") pod \"collect-profiles-29402370-zbvz4\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.230484 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5489cb28-d168-47f1-9976-98f3d6993ec0-secret-volume\") pod \"collect-profiles-29402370-zbvz4\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.230511 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5489cb28-d168-47f1-9976-98f3d6993ec0-config-volume\") pod \"collect-profiles-29402370-zbvz4\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.332653 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47c6z\" (UniqueName: \"kubernetes.io/projected/5489cb28-d168-47f1-9976-98f3d6993ec0-kube-api-access-47c6z\") pod \"collect-profiles-29402370-zbvz4\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.332693 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5489cb28-d168-47f1-9976-98f3d6993ec0-secret-volume\") pod \"collect-profiles-29402370-zbvz4\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.332721 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5489cb28-d168-47f1-9976-98f3d6993ec0-config-volume\") pod \"collect-profiles-29402370-zbvz4\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.333718 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5489cb28-d168-47f1-9976-98f3d6993ec0-config-volume\") pod \"collect-profiles-29402370-zbvz4\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.337799 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5489cb28-d168-47f1-9976-98f3d6993ec0-secret-volume\") pod \"collect-profiles-29402370-zbvz4\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.347753 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47c6z\" (UniqueName: \"kubernetes.io/projected/5489cb28-d168-47f1-9976-98f3d6993ec0-kube-api-access-47c6z\") pod \"collect-profiles-29402370-zbvz4\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:00 crc kubenswrapper[4492]: I1126 07:30:00.452125 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:01 crc kubenswrapper[4492]: I1126 07:30:00.829331 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4"] Nov 26 07:30:01 crc kubenswrapper[4492]: I1126 07:30:01.200044 4492 generic.go:334] "Generic (PLEG): container finished" podID="5489cb28-d168-47f1-9976-98f3d6993ec0" containerID="0fc22a4230b1e40f5410be8b8f8e2f4aa077d6ad1a853539bde05c7ec45f429d" exitCode=0 Nov 26 07:30:01 crc kubenswrapper[4492]: I1126 07:30:01.200095 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" event={"ID":"5489cb28-d168-47f1-9976-98f3d6993ec0","Type":"ContainerDied","Data":"0fc22a4230b1e40f5410be8b8f8e2f4aa077d6ad1a853539bde05c7ec45f429d"} Nov 26 07:30:01 crc kubenswrapper[4492]: I1126 07:30:01.200122 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" event={"ID":"5489cb28-d168-47f1-9976-98f3d6993ec0","Type":"ContainerStarted","Data":"c8c7fb69affb5d94a76e66ef6530a62854afee6a7da73ad681e43b21fc8493ce"} Nov 26 07:30:01 crc kubenswrapper[4492]: I1126 07:30:01.438841 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:30:01 crc kubenswrapper[4492]: E1126 07:30:01.439059 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.459696 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.588665 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5489cb28-d168-47f1-9976-98f3d6993ec0-secret-volume\") pod \"5489cb28-d168-47f1-9976-98f3d6993ec0\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.588737 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47c6z\" (UniqueName: \"kubernetes.io/projected/5489cb28-d168-47f1-9976-98f3d6993ec0-kube-api-access-47c6z\") pod \"5489cb28-d168-47f1-9976-98f3d6993ec0\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.588808 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5489cb28-d168-47f1-9976-98f3d6993ec0-config-volume\") pod \"5489cb28-d168-47f1-9976-98f3d6993ec0\" (UID: \"5489cb28-d168-47f1-9976-98f3d6993ec0\") " Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.589480 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5489cb28-d168-47f1-9976-98f3d6993ec0-config-volume" (OuterVolumeSpecName: "config-volume") pod "5489cb28-d168-47f1-9976-98f3d6993ec0" (UID: "5489cb28-d168-47f1-9976-98f3d6993ec0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.598758 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5489cb28-d168-47f1-9976-98f3d6993ec0-kube-api-access-47c6z" (OuterVolumeSpecName: "kube-api-access-47c6z") pod "5489cb28-d168-47f1-9976-98f3d6993ec0" (UID: "5489cb28-d168-47f1-9976-98f3d6993ec0"). InnerVolumeSpecName "kube-api-access-47c6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.599431 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5489cb28-d168-47f1-9976-98f3d6993ec0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5489cb28-d168-47f1-9976-98f3d6993ec0" (UID: "5489cb28-d168-47f1-9976-98f3d6993ec0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.690528 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5489cb28-d168-47f1-9976-98f3d6993ec0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.690566 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47c6z\" (UniqueName: \"kubernetes.io/projected/5489cb28-d168-47f1-9976-98f3d6993ec0-kube-api-access-47c6z\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:02 crc kubenswrapper[4492]: I1126 07:30:02.690576 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5489cb28-d168-47f1-9976-98f3d6993ec0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:30:03 crc kubenswrapper[4492]: I1126 07:30:03.216841 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" event={"ID":"5489cb28-d168-47f1-9976-98f3d6993ec0","Type":"ContainerDied","Data":"c8c7fb69affb5d94a76e66ef6530a62854afee6a7da73ad681e43b21fc8493ce"} Nov 26 07:30:03 crc kubenswrapper[4492]: I1126 07:30:03.216890 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8c7fb69affb5d94a76e66ef6530a62854afee6a7da73ad681e43b21fc8493ce" Nov 26 07:30:03 crc kubenswrapper[4492]: I1126 07:30:03.216897 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4" Nov 26 07:30:03 crc kubenswrapper[4492]: I1126 07:30:03.541106 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6"] Nov 26 07:30:03 crc kubenswrapper[4492]: I1126 07:30:03.551018 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-9jvn6"] Nov 26 07:30:04 crc kubenswrapper[4492]: I1126 07:30:04.449827 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c5436ae-c6b0-4c8e-b45f-e580fef03690" path="/var/lib/kubelet/pods/7c5436ae-c6b0-4c8e-b45f-e580fef03690/volumes" Nov 26 07:30:15 crc kubenswrapper[4492]: I1126 07:30:15.441555 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:30:15 crc kubenswrapper[4492]: E1126 07:30:15.442299 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:30:30 crc kubenswrapper[4492]: I1126 07:30:30.438613 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:30:31 crc kubenswrapper[4492]: I1126 07:30:31.446678 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"05737428a422f68115df6510cddd9a8440e7b186d0e38423b3778d773e395f95"} Nov 26 07:30:53 crc kubenswrapper[4492]: E1126 07:30:53.683345 4492 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:30:53 crc kubenswrapper[4492]: E1126 07:30:53.683770 4492 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:1f5c0439f2433cb462b222a5bb23e629" Nov 26 07:30:53 crc kubenswrapper[4492]: E1126 07:30:53.685928 4492 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:1f5c0439f2433cb462b222a5bb23e629,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-scc7s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest-s00-multi-thread-testing_openstack(63d7860b-0c48-4075-8658-58e2567d8abf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 07:30:53 crc kubenswrapper[4492]: E1126 07:30:53.687121 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" podUID="63d7860b-0c48-4075-8658-58e2567d8abf" Nov 26 07:30:54 crc kubenswrapper[4492]: E1126 07:30:54.657356 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:1f5c0439f2433cb462b222a5bb23e629\\\"\"" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" podUID="63d7860b-0c48-4075-8658-58e2567d8abf" Nov 26 07:30:59 crc kubenswrapper[4492]: I1126 07:30:59.023986 4492 scope.go:117] "RemoveContainer" containerID="a3fb4226b66e93e9f4b8e5ab19a9f686ae4bb121be382ad5f9a8949862737785" Nov 26 07:31:07 crc kubenswrapper[4492]: I1126 07:31:07.118207 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 26 07:31:08 crc kubenswrapper[4492]: I1126 07:31:08.755732 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"63d7860b-0c48-4075-8658-58e2567d8abf","Type":"ContainerStarted","Data":"f78b7e4b129d84da005fdbeae417db774cdf0e1b093c61453a424eaa20c49f8d"} Nov 26 07:31:08 crc kubenswrapper[4492]: I1126 07:31:08.771570 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" podStartSLOduration=4.060432883 podStartE2EDuration="1m42.771555703s" podCreationTimestamp="2025-11-26 07:29:26 +0000 UTC" firstStartedPulling="2025-11-26 07:29:28.404492716 +0000 UTC m=+2464.288381014" lastFinishedPulling="2025-11-26 07:31:07.115615537 +0000 UTC m=+2562.999503834" observedRunningTime="2025-11-26 07:31:08.771434564 +0000 UTC m=+2564.655322863" watchObservedRunningTime="2025-11-26 07:31:08.771555703 +0000 UTC m=+2564.655444001" Nov 26 07:32:49 crc kubenswrapper[4492]: I1126 07:32:49.441922 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:32:49 crc kubenswrapper[4492]: I1126 07:32:49.443392 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:33:19 crc kubenswrapper[4492]: I1126 07:33:19.441205 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:33:19 crc kubenswrapper[4492]: I1126 07:33:19.441681 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:33:49 crc kubenswrapper[4492]: I1126 07:33:49.441867 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:33:49 crc kubenswrapper[4492]: I1126 07:33:49.442306 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:33:49 crc kubenswrapper[4492]: I1126 07:33:49.442881 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:33:49 crc kubenswrapper[4492]: I1126 07:33:49.443827 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"05737428a422f68115df6510cddd9a8440e7b186d0e38423b3778d773e395f95"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:33:49 crc kubenswrapper[4492]: I1126 07:33:49.444808 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://05737428a422f68115df6510cddd9a8440e7b186d0e38423b3778d773e395f95" gracePeriod=600 Nov 26 07:33:50 crc kubenswrapper[4492]: I1126 07:33:50.032196 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"05737428a422f68115df6510cddd9a8440e7b186d0e38423b3778d773e395f95"} Nov 26 07:33:50 crc kubenswrapper[4492]: I1126 07:33:50.032186 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="05737428a422f68115df6510cddd9a8440e7b186d0e38423b3778d773e395f95" exitCode=0 Nov 26 07:33:50 crc kubenswrapper[4492]: I1126 07:33:50.032531 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34"} Nov 26 07:33:50 crc kubenswrapper[4492]: I1126 07:33:50.033307 4492 scope.go:117] "RemoveContainer" containerID="bbb2c9d89253c0550a885310fa6b6be530f4d28143ea5439ce2e8887d31b7abb" Nov 26 07:35:49 crc kubenswrapper[4492]: I1126 07:35:49.443677 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:35:49 crc kubenswrapper[4492]: I1126 07:35:49.447220 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.270601 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6hqbh"] Nov 26 07:36:09 crc kubenswrapper[4492]: E1126 07:36:09.274547 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5489cb28-d168-47f1-9976-98f3d6993ec0" containerName="collect-profiles" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.275009 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="5489cb28-d168-47f1-9976-98f3d6993ec0" containerName="collect-profiles" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.275950 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="5489cb28-d168-47f1-9976-98f3d6993ec0" containerName="collect-profiles" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.281093 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.337153 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c56q\" (UniqueName: \"kubernetes.io/projected/0d6034b0-372d-401f-8891-1b64f6582d19-kube-api-access-8c56q\") pod \"redhat-marketplace-6hqbh\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.337418 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-catalog-content\") pod \"redhat-marketplace-6hqbh\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.337529 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-utilities\") pod \"redhat-marketplace-6hqbh\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.400570 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6hqbh"] Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.439051 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-utilities\") pod \"redhat-marketplace-6hqbh\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.439117 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c56q\" (UniqueName: \"kubernetes.io/projected/0d6034b0-372d-401f-8891-1b64f6582d19-kube-api-access-8c56q\") pod \"redhat-marketplace-6hqbh\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.439156 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-catalog-content\") pod \"redhat-marketplace-6hqbh\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.442459 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-catalog-content\") pod \"redhat-marketplace-6hqbh\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.443088 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-utilities\") pod \"redhat-marketplace-6hqbh\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.470886 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c56q\" (UniqueName: \"kubernetes.io/projected/0d6034b0-372d-401f-8891-1b64f6582d19-kube-api-access-8c56q\") pod \"redhat-marketplace-6hqbh\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:09 crc kubenswrapper[4492]: I1126 07:36:09.602318 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:10 crc kubenswrapper[4492]: I1126 07:36:10.501206 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6hqbh"] Nov 26 07:36:11 crc kubenswrapper[4492]: I1126 07:36:11.200360 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6hqbh" event={"ID":"0d6034b0-372d-401f-8891-1b64f6582d19","Type":"ContainerDied","Data":"e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809"} Nov 26 07:36:11 crc kubenswrapper[4492]: I1126 07:36:11.200573 4492 generic.go:334] "Generic (PLEG): container finished" podID="0d6034b0-372d-401f-8891-1b64f6582d19" containerID="e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809" exitCode=0 Nov 26 07:36:11 crc kubenswrapper[4492]: I1126 07:36:11.201000 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6hqbh" event={"ID":"0d6034b0-372d-401f-8891-1b64f6582d19","Type":"ContainerStarted","Data":"077025029c6455f5e50bfadc94df84b333bb11340a26331a16888693a599a2f6"} Nov 26 07:36:11 crc kubenswrapper[4492]: I1126 07:36:11.206743 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:36:12 crc kubenswrapper[4492]: I1126 07:36:12.209394 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6hqbh" event={"ID":"0d6034b0-372d-401f-8891-1b64f6582d19","Type":"ContainerStarted","Data":"63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e"} Nov 26 07:36:13 crc kubenswrapper[4492]: I1126 07:36:13.221014 4492 generic.go:334] "Generic (PLEG): container finished" podID="0d6034b0-372d-401f-8891-1b64f6582d19" containerID="63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e" exitCode=0 Nov 26 07:36:13 crc kubenswrapper[4492]: I1126 07:36:13.221866 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6hqbh" event={"ID":"0d6034b0-372d-401f-8891-1b64f6582d19","Type":"ContainerDied","Data":"63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e"} Nov 26 07:36:14 crc kubenswrapper[4492]: I1126 07:36:14.233333 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6hqbh" event={"ID":"0d6034b0-372d-401f-8891-1b64f6582d19","Type":"ContainerStarted","Data":"22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234"} Nov 26 07:36:14 crc kubenswrapper[4492]: I1126 07:36:14.264609 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6hqbh" podStartSLOduration=2.727577 podStartE2EDuration="5.262526707s" podCreationTimestamp="2025-11-26 07:36:09 +0000 UTC" firstStartedPulling="2025-11-26 07:36:11.203145854 +0000 UTC m=+2867.087034152" lastFinishedPulling="2025-11-26 07:36:13.738095561 +0000 UTC m=+2869.621983859" observedRunningTime="2025-11-26 07:36:14.250895667 +0000 UTC m=+2870.134783965" watchObservedRunningTime="2025-11-26 07:36:14.262526707 +0000 UTC m=+2870.146415004" Nov 26 07:36:19 crc kubenswrapper[4492]: I1126 07:36:19.441212 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:36:19 crc kubenswrapper[4492]: I1126 07:36:19.441846 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:36:19 crc kubenswrapper[4492]: I1126 07:36:19.602679 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:19 crc kubenswrapper[4492]: I1126 07:36:19.602751 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:19 crc kubenswrapper[4492]: I1126 07:36:19.745872 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:20 crc kubenswrapper[4492]: I1126 07:36:20.333480 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:20 crc kubenswrapper[4492]: I1126 07:36:20.390383 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6hqbh"] Nov 26 07:36:22 crc kubenswrapper[4492]: I1126 07:36:22.304021 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6hqbh" podUID="0d6034b0-372d-401f-8891-1b64f6582d19" containerName="registry-server" containerID="cri-o://22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234" gracePeriod=2 Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.157912 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.249586 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8c56q\" (UniqueName: \"kubernetes.io/projected/0d6034b0-372d-401f-8891-1b64f6582d19-kube-api-access-8c56q\") pod \"0d6034b0-372d-401f-8891-1b64f6582d19\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.249776 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-utilities\") pod \"0d6034b0-372d-401f-8891-1b64f6582d19\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.249892 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-catalog-content\") pod \"0d6034b0-372d-401f-8891-1b64f6582d19\" (UID: \"0d6034b0-372d-401f-8891-1b64f6582d19\") " Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.253251 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-utilities" (OuterVolumeSpecName: "utilities") pod "0d6034b0-372d-401f-8891-1b64f6582d19" (UID: "0d6034b0-372d-401f-8891-1b64f6582d19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.272479 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0d6034b0-372d-401f-8891-1b64f6582d19" (UID: "0d6034b0-372d-401f-8891-1b64f6582d19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.282210 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d6034b0-372d-401f-8891-1b64f6582d19-kube-api-access-8c56q" (OuterVolumeSpecName: "kube-api-access-8c56q") pod "0d6034b0-372d-401f-8891-1b64f6582d19" (UID: "0d6034b0-372d-401f-8891-1b64f6582d19"). InnerVolumeSpecName "kube-api-access-8c56q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.328311 4492 generic.go:334] "Generic (PLEG): container finished" podID="0d6034b0-372d-401f-8891-1b64f6582d19" containerID="22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234" exitCode=0 Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.328383 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6hqbh" event={"ID":"0d6034b0-372d-401f-8891-1b64f6582d19","Type":"ContainerDied","Data":"22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234"} Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.328438 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6hqbh" event={"ID":"0d6034b0-372d-401f-8891-1b64f6582d19","Type":"ContainerDied","Data":"077025029c6455f5e50bfadc94df84b333bb11340a26331a16888693a599a2f6"} Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.329085 4492 scope.go:117] "RemoveContainer" containerID="22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.329298 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6hqbh" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.354259 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8c56q\" (UniqueName: \"kubernetes.io/projected/0d6034b0-372d-401f-8891-1b64f6582d19-kube-api-access-8c56q\") on node \"crc\" DevicePath \"\"" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.354296 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.354307 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d6034b0-372d-401f-8891-1b64f6582d19-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.395528 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6hqbh"] Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.409997 4492 scope.go:117] "RemoveContainer" containerID="63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.410248 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6hqbh"] Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.463459 4492 scope.go:117] "RemoveContainer" containerID="e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.523843 4492 scope.go:117] "RemoveContainer" containerID="22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234" Nov 26 07:36:23 crc kubenswrapper[4492]: E1126 07:36:23.526872 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234\": container with ID starting with 22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234 not found: ID does not exist" containerID="22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.527216 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234"} err="failed to get container status \"22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234\": rpc error: code = NotFound desc = could not find container \"22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234\": container with ID starting with 22c7173b8c3adca1ad4ee86e009768881caaee1a28fe2a58c2bd20ba669e0234 not found: ID does not exist" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.527249 4492 scope.go:117] "RemoveContainer" containerID="63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e" Nov 26 07:36:23 crc kubenswrapper[4492]: E1126 07:36:23.527638 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e\": container with ID starting with 63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e not found: ID does not exist" containerID="63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.527670 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e"} err="failed to get container status \"63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e\": rpc error: code = NotFound desc = could not find container \"63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e\": container with ID starting with 63fa92d02ad3b4e838a1fd22103bb65c54807c4b1510ca6abd1b790e1bf2933e not found: ID does not exist" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.527686 4492 scope.go:117] "RemoveContainer" containerID="e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809" Nov 26 07:36:23 crc kubenswrapper[4492]: E1126 07:36:23.527920 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809\": container with ID starting with e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809 not found: ID does not exist" containerID="e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809" Nov 26 07:36:23 crc kubenswrapper[4492]: I1126 07:36:23.527946 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809"} err="failed to get container status \"e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809\": rpc error: code = NotFound desc = could not find container \"e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809\": container with ID starting with e4cdc182881900f9c2cecb93fd7d4aabcc59628ea512ab41f602fe179c366809 not found: ID does not exist" Nov 26 07:36:24 crc kubenswrapper[4492]: I1126 07:36:24.448442 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d6034b0-372d-401f-8891-1b64f6582d19" path="/var/lib/kubelet/pods/0d6034b0-372d-401f-8891-1b64f6582d19/volumes" Nov 26 07:36:49 crc kubenswrapper[4492]: I1126 07:36:49.444188 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:36:49 crc kubenswrapper[4492]: I1126 07:36:49.446235 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:36:49 crc kubenswrapper[4492]: I1126 07:36:49.446720 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:36:49 crc kubenswrapper[4492]: I1126 07:36:49.447861 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:36:49 crc kubenswrapper[4492]: I1126 07:36:49.448258 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" gracePeriod=600 Nov 26 07:36:49 crc kubenswrapper[4492]: E1126 07:36:49.587282 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:36:50 crc kubenswrapper[4492]: I1126 07:36:50.576453 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" exitCode=0 Nov 26 07:36:50 crc kubenswrapper[4492]: I1126 07:36:50.576513 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34"} Nov 26 07:36:50 crc kubenswrapper[4492]: I1126 07:36:50.577145 4492 scope.go:117] "RemoveContainer" containerID="05737428a422f68115df6510cddd9a8440e7b186d0e38423b3778d773e395f95" Nov 26 07:36:50 crc kubenswrapper[4492]: I1126 07:36:50.578377 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:36:50 crc kubenswrapper[4492]: E1126 07:36:50.578947 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:37:02 crc kubenswrapper[4492]: I1126 07:37:02.440402 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:37:02 crc kubenswrapper[4492]: E1126 07:37:02.442367 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.585417 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vmz5f"] Nov 26 07:37:09 crc kubenswrapper[4492]: E1126 07:37:09.588358 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6034b0-372d-401f-8891-1b64f6582d19" containerName="registry-server" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.588389 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6034b0-372d-401f-8891-1b64f6582d19" containerName="registry-server" Nov 26 07:37:09 crc kubenswrapper[4492]: E1126 07:37:09.588582 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6034b0-372d-401f-8891-1b64f6582d19" containerName="extract-content" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.588598 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6034b0-372d-401f-8891-1b64f6582d19" containerName="extract-content" Nov 26 07:37:09 crc kubenswrapper[4492]: E1126 07:37:09.588627 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6034b0-372d-401f-8891-1b64f6582d19" containerName="extract-utilities" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.588633 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6034b0-372d-401f-8891-1b64f6582d19" containerName="extract-utilities" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.589315 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d6034b0-372d-401f-8891-1b64f6582d19" containerName="registry-server" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.593254 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.616201 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-catalog-content\") pod \"certified-operators-vmz5f\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.616346 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-utilities\") pod \"certified-operators-vmz5f\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.616403 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrkhl\" (UniqueName: \"kubernetes.io/projected/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-kube-api-access-zrkhl\") pod \"certified-operators-vmz5f\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.693728 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vmz5f"] Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.719097 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-catalog-content\") pod \"certified-operators-vmz5f\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.719226 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-utilities\") pod \"certified-operators-vmz5f\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.719273 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrkhl\" (UniqueName: \"kubernetes.io/projected/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-kube-api-access-zrkhl\") pod \"certified-operators-vmz5f\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.730126 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-utilities\") pod \"certified-operators-vmz5f\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.733566 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-catalog-content\") pod \"certified-operators-vmz5f\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.769784 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrkhl\" (UniqueName: \"kubernetes.io/projected/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-kube-api-access-zrkhl\") pod \"certified-operators-vmz5f\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:09 crc kubenswrapper[4492]: I1126 07:37:09.928610 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:10 crc kubenswrapper[4492]: I1126 07:37:10.754573 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vmz5f"] Nov 26 07:37:11 crc kubenswrapper[4492]: I1126 07:37:11.792617 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vmz5f" event={"ID":"98d54f1d-cd1f-4d83-bac3-e21baacc15cf","Type":"ContainerDied","Data":"283c3f0dc743cd16ec85078f3d96e94589131abd7b0887eec3a1755a6640995c"} Nov 26 07:37:11 crc kubenswrapper[4492]: I1126 07:37:11.792551 4492 generic.go:334] "Generic (PLEG): container finished" podID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerID="283c3f0dc743cd16ec85078f3d96e94589131abd7b0887eec3a1755a6640995c" exitCode=0 Nov 26 07:37:11 crc kubenswrapper[4492]: I1126 07:37:11.793245 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vmz5f" event={"ID":"98d54f1d-cd1f-4d83-bac3-e21baacc15cf","Type":"ContainerStarted","Data":"7cba650894980f212e48011da8cf49858839b4e8c72d07990d69d567eed77464"} Nov 26 07:37:13 crc kubenswrapper[4492]: I1126 07:37:13.813385 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vmz5f" event={"ID":"98d54f1d-cd1f-4d83-bac3-e21baacc15cf","Type":"ContainerStarted","Data":"eb27a0fa24b205fc239a87dd69dcd3ab660902bc589384d08035d9e69197f896"} Nov 26 07:37:14 crc kubenswrapper[4492]: I1126 07:37:14.447887 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:37:14 crc kubenswrapper[4492]: E1126 07:37:14.449267 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:37:14 crc kubenswrapper[4492]: I1126 07:37:14.822741 4492 generic.go:334] "Generic (PLEG): container finished" podID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerID="eb27a0fa24b205fc239a87dd69dcd3ab660902bc589384d08035d9e69197f896" exitCode=0 Nov 26 07:37:14 crc kubenswrapper[4492]: I1126 07:37:14.822789 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vmz5f" event={"ID":"98d54f1d-cd1f-4d83-bac3-e21baacc15cf","Type":"ContainerDied","Data":"eb27a0fa24b205fc239a87dd69dcd3ab660902bc589384d08035d9e69197f896"} Nov 26 07:37:15 crc kubenswrapper[4492]: I1126 07:37:15.833296 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vmz5f" event={"ID":"98d54f1d-cd1f-4d83-bac3-e21baacc15cf","Type":"ContainerStarted","Data":"c04f29b8add33f8f4a4e890fb74fb0ee96b0464985b7d4b15a4117b30669d275"} Nov 26 07:37:15 crc kubenswrapper[4492]: I1126 07:37:15.858860 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vmz5f" podStartSLOduration=3.23321563 podStartE2EDuration="6.857605075s" podCreationTimestamp="2025-11-26 07:37:09 +0000 UTC" firstStartedPulling="2025-11-26 07:37:11.794743463 +0000 UTC m=+2927.678631760" lastFinishedPulling="2025-11-26 07:37:15.419132906 +0000 UTC m=+2931.303021205" observedRunningTime="2025-11-26 07:37:15.847302704 +0000 UTC m=+2931.731191002" watchObservedRunningTime="2025-11-26 07:37:15.857605075 +0000 UTC m=+2931.741493373" Nov 26 07:37:19 crc kubenswrapper[4492]: I1126 07:37:19.929886 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:19 crc kubenswrapper[4492]: I1126 07:37:19.930446 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:20 crc kubenswrapper[4492]: I1126 07:37:20.969103 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-vmz5f" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerName="registry-server" probeResult="failure" output=< Nov 26 07:37:20 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 07:37:20 crc kubenswrapper[4492]: > Nov 26 07:37:27 crc kubenswrapper[4492]: I1126 07:37:27.440152 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:37:27 crc kubenswrapper[4492]: E1126 07:37:27.442256 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:37:29 crc kubenswrapper[4492]: I1126 07:37:29.997489 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:30 crc kubenswrapper[4492]: I1126 07:37:30.038035 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:30 crc kubenswrapper[4492]: I1126 07:37:30.606843 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vmz5f"] Nov 26 07:37:31 crc kubenswrapper[4492]: I1126 07:37:31.979213 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vmz5f" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerName="registry-server" containerID="cri-o://c04f29b8add33f8f4a4e890fb74fb0ee96b0464985b7d4b15a4117b30669d275" gracePeriod=2 Nov 26 07:37:32 crc kubenswrapper[4492]: I1126 07:37:32.995158 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vmz5f" event={"ID":"98d54f1d-cd1f-4d83-bac3-e21baacc15cf","Type":"ContainerDied","Data":"c04f29b8add33f8f4a4e890fb74fb0ee96b0464985b7d4b15a4117b30669d275"} Nov 26 07:37:32 crc kubenswrapper[4492]: I1126 07:37:32.995084 4492 generic.go:334] "Generic (PLEG): container finished" podID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerID="c04f29b8add33f8f4a4e890fb74fb0ee96b0464985b7d4b15a4117b30669d275" exitCode=0 Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.163153 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.260133 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-utilities\") pod \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.260361 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-catalog-content\") pod \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.260475 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrkhl\" (UniqueName: \"kubernetes.io/projected/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-kube-api-access-zrkhl\") pod \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\" (UID: \"98d54f1d-cd1f-4d83-bac3-e21baacc15cf\") " Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.265346 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-utilities" (OuterVolumeSpecName: "utilities") pod "98d54f1d-cd1f-4d83-bac3-e21baacc15cf" (UID: "98d54f1d-cd1f-4d83-bac3-e21baacc15cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.287109 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-kube-api-access-zrkhl" (OuterVolumeSpecName: "kube-api-access-zrkhl") pod "98d54f1d-cd1f-4d83-bac3-e21baacc15cf" (UID: "98d54f1d-cd1f-4d83-bac3-e21baacc15cf"). InnerVolumeSpecName "kube-api-access-zrkhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.365089 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrkhl\" (UniqueName: \"kubernetes.io/projected/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-kube-api-access-zrkhl\") on node \"crc\" DevicePath \"\"" Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.365454 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.377715 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "98d54f1d-cd1f-4d83-bac3-e21baacc15cf" (UID: "98d54f1d-cd1f-4d83-bac3-e21baacc15cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:37:33 crc kubenswrapper[4492]: I1126 07:37:33.467904 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98d54f1d-cd1f-4d83-bac3-e21baacc15cf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:37:34 crc kubenswrapper[4492]: I1126 07:37:34.023081 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vmz5f" event={"ID":"98d54f1d-cd1f-4d83-bac3-e21baacc15cf","Type":"ContainerDied","Data":"7cba650894980f212e48011da8cf49858839b4e8c72d07990d69d567eed77464"} Nov 26 07:37:34 crc kubenswrapper[4492]: I1126 07:37:34.023657 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vmz5f" Nov 26 07:37:34 crc kubenswrapper[4492]: I1126 07:37:34.026162 4492 scope.go:117] "RemoveContainer" containerID="c04f29b8add33f8f4a4e890fb74fb0ee96b0464985b7d4b15a4117b30669d275" Nov 26 07:37:34 crc kubenswrapper[4492]: I1126 07:37:34.066133 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vmz5f"] Nov 26 07:37:34 crc kubenswrapper[4492]: I1126 07:37:34.076472 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vmz5f"] Nov 26 07:37:34 crc kubenswrapper[4492]: I1126 07:37:34.085018 4492 scope.go:117] "RemoveContainer" containerID="eb27a0fa24b205fc239a87dd69dcd3ab660902bc589384d08035d9e69197f896" Nov 26 07:37:34 crc kubenswrapper[4492]: I1126 07:37:34.108075 4492 scope.go:117] "RemoveContainer" containerID="283c3f0dc743cd16ec85078f3d96e94589131abd7b0887eec3a1755a6640995c" Nov 26 07:37:34 crc kubenswrapper[4492]: I1126 07:37:34.450749 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" path="/var/lib/kubelet/pods/98d54f1d-cd1f-4d83-bac3-e21baacc15cf/volumes" Nov 26 07:37:42 crc kubenswrapper[4492]: I1126 07:37:42.442536 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:37:42 crc kubenswrapper[4492]: E1126 07:37:42.444667 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:37:57 crc kubenswrapper[4492]: I1126 07:37:57.442570 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:37:57 crc kubenswrapper[4492]: E1126 07:37:57.446017 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:38:12 crc kubenswrapper[4492]: I1126 07:38:12.439686 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:38:12 crc kubenswrapper[4492]: E1126 07:38:12.441913 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:38:27 crc kubenswrapper[4492]: I1126 07:38:27.451297 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:38:27 crc kubenswrapper[4492]: E1126 07:38:27.452127 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:38:40 crc kubenswrapper[4492]: I1126 07:38:40.439612 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:38:40 crc kubenswrapper[4492]: E1126 07:38:40.440617 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:38:55 crc kubenswrapper[4492]: I1126 07:38:55.439971 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:38:55 crc kubenswrapper[4492]: E1126 07:38:55.441439 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:38:57 crc kubenswrapper[4492]: I1126 07:38:57.839409 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bdjc2"] Nov 26 07:38:57 crc kubenswrapper[4492]: E1126 07:38:57.843105 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerName="extract-utilities" Nov 26 07:38:57 crc kubenswrapper[4492]: I1126 07:38:57.843145 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerName="extract-utilities" Nov 26 07:38:57 crc kubenswrapper[4492]: E1126 07:38:57.843164 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerName="registry-server" Nov 26 07:38:57 crc kubenswrapper[4492]: I1126 07:38:57.843186 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerName="registry-server" Nov 26 07:38:57 crc kubenswrapper[4492]: E1126 07:38:57.843242 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerName="extract-content" Nov 26 07:38:57 crc kubenswrapper[4492]: I1126 07:38:57.843248 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerName="extract-content" Nov 26 07:38:57 crc kubenswrapper[4492]: I1126 07:38:57.844124 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="98d54f1d-cd1f-4d83-bac3-e21baacc15cf" containerName="registry-server" Nov 26 07:38:57 crc kubenswrapper[4492]: I1126 07:38:57.851580 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:57 crc kubenswrapper[4492]: I1126 07:38:57.971938 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bdjc2"] Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.018213 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-catalog-content\") pod \"redhat-operators-bdjc2\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.018333 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-utilities\") pod \"redhat-operators-bdjc2\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.018562 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phtm8\" (UniqueName: \"kubernetes.io/projected/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-kube-api-access-phtm8\") pod \"redhat-operators-bdjc2\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.120379 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-catalog-content\") pod \"redhat-operators-bdjc2\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.120437 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-utilities\") pod \"redhat-operators-bdjc2\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.120530 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phtm8\" (UniqueName: \"kubernetes.io/projected/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-kube-api-access-phtm8\") pod \"redhat-operators-bdjc2\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.123473 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-catalog-content\") pod \"redhat-operators-bdjc2\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.123977 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-utilities\") pod \"redhat-operators-bdjc2\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.144487 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phtm8\" (UniqueName: \"kubernetes.io/projected/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-kube-api-access-phtm8\") pod \"redhat-operators-bdjc2\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:58 crc kubenswrapper[4492]: I1126 07:38:58.174457 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:38:59 crc kubenswrapper[4492]: I1126 07:38:59.093611 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bdjc2"] Nov 26 07:38:59 crc kubenswrapper[4492]: I1126 07:38:59.811582 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bdjc2" event={"ID":"fc86f75a-e710-4649-a19e-5e37e8d5a2c1","Type":"ContainerDied","Data":"9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747"} Nov 26 07:38:59 crc kubenswrapper[4492]: I1126 07:38:59.811503 4492 generic.go:334] "Generic (PLEG): container finished" podID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerID="9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747" exitCode=0 Nov 26 07:38:59 crc kubenswrapper[4492]: I1126 07:38:59.811949 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bdjc2" event={"ID":"fc86f75a-e710-4649-a19e-5e37e8d5a2c1","Type":"ContainerStarted","Data":"50e5af899bd6cb294fb72b7e7bbc3ba5a46491a56a6dec582f49bc9b5b127963"} Nov 26 07:39:00 crc kubenswrapper[4492]: I1126 07:39:00.823206 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bdjc2" event={"ID":"fc86f75a-e710-4649-a19e-5e37e8d5a2c1","Type":"ContainerStarted","Data":"02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1"} Nov 26 07:39:02 crc kubenswrapper[4492]: E1126 07:39:02.790623 4492 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.180:37852->192.168.25.180:45641: write tcp 192.168.25.180:37852->192.168.25.180:45641: write: connection reset by peer Nov 26 07:39:03 crc kubenswrapper[4492]: I1126 07:39:03.852493 4492 generic.go:334] "Generic (PLEG): container finished" podID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerID="02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1" exitCode=0 Nov 26 07:39:03 crc kubenswrapper[4492]: I1126 07:39:03.852594 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bdjc2" event={"ID":"fc86f75a-e710-4649-a19e-5e37e8d5a2c1","Type":"ContainerDied","Data":"02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1"} Nov 26 07:39:04 crc kubenswrapper[4492]: I1126 07:39:04.862863 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bdjc2" event={"ID":"fc86f75a-e710-4649-a19e-5e37e8d5a2c1","Type":"ContainerStarted","Data":"d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b"} Nov 26 07:39:04 crc kubenswrapper[4492]: I1126 07:39:04.887059 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bdjc2" podStartSLOduration=3.198685239 podStartE2EDuration="7.88659752s" podCreationTimestamp="2025-11-26 07:38:57 +0000 UTC" firstStartedPulling="2025-11-26 07:38:59.814701835 +0000 UTC m=+3035.698590133" lastFinishedPulling="2025-11-26 07:39:04.502614116 +0000 UTC m=+3040.386502414" observedRunningTime="2025-11-26 07:39:04.885595045 +0000 UTC m=+3040.769483344" watchObservedRunningTime="2025-11-26 07:39:04.88659752 +0000 UTC m=+3040.770485819" Nov 26 07:39:06 crc kubenswrapper[4492]: I1126 07:39:06.438985 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:39:06 crc kubenswrapper[4492]: E1126 07:39:06.440642 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:39:08 crc kubenswrapper[4492]: I1126 07:39:08.177293 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:39:08 crc kubenswrapper[4492]: I1126 07:39:08.177651 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:39:09 crc kubenswrapper[4492]: I1126 07:39:09.217794 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bdjc2" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="registry-server" probeResult="failure" output=< Nov 26 07:39:09 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 07:39:09 crc kubenswrapper[4492]: > Nov 26 07:39:17 crc kubenswrapper[4492]: I1126 07:39:17.439701 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:39:17 crc kubenswrapper[4492]: E1126 07:39:17.440428 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:39:19 crc kubenswrapper[4492]: I1126 07:39:19.213283 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bdjc2" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="registry-server" probeResult="failure" output=< Nov 26 07:39:19 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 07:39:19 crc kubenswrapper[4492]: > Nov 26 07:39:28 crc kubenswrapper[4492]: I1126 07:39:28.237850 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:39:28 crc kubenswrapper[4492]: I1126 07:39:28.277454 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:39:28 crc kubenswrapper[4492]: I1126 07:39:28.946605 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bdjc2"] Nov 26 07:39:30 crc kubenswrapper[4492]: I1126 07:39:30.117535 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bdjc2" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="registry-server" containerID="cri-o://d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b" gracePeriod=2 Nov 26 07:39:30 crc kubenswrapper[4492]: I1126 07:39:30.439571 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:39:30 crc kubenswrapper[4492]: E1126 07:39:30.439986 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.123953 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.128967 4492 generic.go:334] "Generic (PLEG): container finished" podID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerID="d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b" exitCode=0 Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.129293 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bdjc2" event={"ID":"fc86f75a-e710-4649-a19e-5e37e8d5a2c1","Type":"ContainerDied","Data":"d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b"} Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.129337 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bdjc2" event={"ID":"fc86f75a-e710-4649-a19e-5e37e8d5a2c1","Type":"ContainerDied","Data":"50e5af899bd6cb294fb72b7e7bbc3ba5a46491a56a6dec582f49bc9b5b127963"} Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.129358 4492 scope.go:117] "RemoveContainer" containerID="d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.189596 4492 scope.go:117] "RemoveContainer" containerID="02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.244631 4492 scope.go:117] "RemoveContainer" containerID="9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.273616 4492 scope.go:117] "RemoveContainer" containerID="d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b" Nov 26 07:39:31 crc kubenswrapper[4492]: E1126 07:39:31.277276 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b\": container with ID starting with d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b not found: ID does not exist" containerID="d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.277775 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b"} err="failed to get container status \"d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b\": rpc error: code = NotFound desc = could not find container \"d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b\": container with ID starting with d960df9fa351b8514f614101874430901ff3464a13045419d062d30f666eb14b not found: ID does not exist" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.277824 4492 scope.go:117] "RemoveContainer" containerID="02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1" Nov 26 07:39:31 crc kubenswrapper[4492]: E1126 07:39:31.278354 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1\": container with ID starting with 02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1 not found: ID does not exist" containerID="02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.278422 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1"} err="failed to get container status \"02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1\": rpc error: code = NotFound desc = could not find container \"02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1\": container with ID starting with 02b7482f32a2facc9e54b49a21e845481def927016a2570e8b46a3d5c7f7d1d1 not found: ID does not exist" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.278449 4492 scope.go:117] "RemoveContainer" containerID="9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747" Nov 26 07:39:31 crc kubenswrapper[4492]: E1126 07:39:31.278872 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747\": container with ID starting with 9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747 not found: ID does not exist" containerID="9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.278899 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747"} err="failed to get container status \"9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747\": rpc error: code = NotFound desc = could not find container \"9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747\": container with ID starting with 9a6ef962e24a051414fed2698c8e7857c17815ab271875e2a447c91d2aeb5747 not found: ID does not exist" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.319326 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phtm8\" (UniqueName: \"kubernetes.io/projected/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-kube-api-access-phtm8\") pod \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.319533 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-catalog-content\") pod \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.319574 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-utilities\") pod \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\" (UID: \"fc86f75a-e710-4649-a19e-5e37e8d5a2c1\") " Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.321473 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-utilities" (OuterVolumeSpecName: "utilities") pod "fc86f75a-e710-4649-a19e-5e37e8d5a2c1" (UID: "fc86f75a-e710-4649-a19e-5e37e8d5a2c1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.340333 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-kube-api-access-phtm8" (OuterVolumeSpecName: "kube-api-access-phtm8") pod "fc86f75a-e710-4649-a19e-5e37e8d5a2c1" (UID: "fc86f75a-e710-4649-a19e-5e37e8d5a2c1"). InnerVolumeSpecName "kube-api-access-phtm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.398903 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fc86f75a-e710-4649-a19e-5e37e8d5a2c1" (UID: "fc86f75a-e710-4649-a19e-5e37e8d5a2c1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.422414 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phtm8\" (UniqueName: \"kubernetes.io/projected/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-kube-api-access-phtm8\") on node \"crc\" DevicePath \"\"" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.422452 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:39:31 crc kubenswrapper[4492]: I1126 07:39:31.422464 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc86f75a-e710-4649-a19e-5e37e8d5a2c1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:39:32 crc kubenswrapper[4492]: I1126 07:39:32.137392 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bdjc2" Nov 26 07:39:32 crc kubenswrapper[4492]: I1126 07:39:32.167244 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bdjc2"] Nov 26 07:39:32 crc kubenswrapper[4492]: I1126 07:39:32.176932 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bdjc2"] Nov 26 07:39:32 crc kubenswrapper[4492]: I1126 07:39:32.448438 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" path="/var/lib/kubelet/pods/fc86f75a-e710-4649-a19e-5e37e8d5a2c1/volumes" Nov 26 07:39:41 crc kubenswrapper[4492]: I1126 07:39:41.438523 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:39:41 crc kubenswrapper[4492]: E1126 07:39:41.439268 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:39:56 crc kubenswrapper[4492]: I1126 07:39:56.438883 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:39:56 crc kubenswrapper[4492]: E1126 07:39:56.439623 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:40:09 crc kubenswrapper[4492]: I1126 07:40:09.461217 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:40:09 crc kubenswrapper[4492]: E1126 07:40:09.462642 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:40:21 crc kubenswrapper[4492]: I1126 07:40:21.439646 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:40:21 crc kubenswrapper[4492]: E1126 07:40:21.440430 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:40:33 crc kubenswrapper[4492]: I1126 07:40:33.439228 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:40:33 crc kubenswrapper[4492]: E1126 07:40:33.439800 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:40:47 crc kubenswrapper[4492]: I1126 07:40:47.438824 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:40:47 crc kubenswrapper[4492]: E1126 07:40:47.439832 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:40:58 crc kubenswrapper[4492]: I1126 07:40:58.439091 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:40:58 crc kubenswrapper[4492]: E1126 07:40:58.441280 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:41:13 crc kubenswrapper[4492]: I1126 07:41:13.439225 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:41:13 crc kubenswrapper[4492]: E1126 07:41:13.439956 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:41:28 crc kubenswrapper[4492]: I1126 07:41:28.438037 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:41:28 crc kubenswrapper[4492]: E1126 07:41:28.439950 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:41:40 crc kubenswrapper[4492]: I1126 07:41:40.438950 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:41:40 crc kubenswrapper[4492]: E1126 07:41:40.439601 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:41:55 crc kubenswrapper[4492]: I1126 07:41:55.438982 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:41:56 crc kubenswrapper[4492]: I1126 07:41:56.263719 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"ff9d85070d91579120549cbc57175c28badf619aa6c5006efb62e2b7a647e0df"} Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.196531 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qz84p"] Nov 26 07:42:48 crc kubenswrapper[4492]: E1126 07:42:48.199159 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="registry-server" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.199193 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="registry-server" Nov 26 07:42:48 crc kubenswrapper[4492]: E1126 07:42:48.199835 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="extract-content" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.199850 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="extract-content" Nov 26 07:42:48 crc kubenswrapper[4492]: E1126 07:42:48.199863 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="extract-utilities" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.199869 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="extract-utilities" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.203135 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc86f75a-e710-4649-a19e-5e37e8d5a2c1" containerName="registry-server" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.209522 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.291810 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-catalog-content\") pod \"community-operators-qz84p\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.291886 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gh8nt\" (UniqueName: \"kubernetes.io/projected/585c8377-db56-4bbb-ae96-d31e130161cb-kube-api-access-gh8nt\") pod \"community-operators-qz84p\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.292044 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-utilities\") pod \"community-operators-qz84p\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.330268 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qz84p"] Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.392698 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-utilities\") pod \"community-operators-qz84p\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.392965 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-catalog-content\") pod \"community-operators-qz84p\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.392989 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gh8nt\" (UniqueName: \"kubernetes.io/projected/585c8377-db56-4bbb-ae96-d31e130161cb-kube-api-access-gh8nt\") pod \"community-operators-qz84p\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.396283 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-catalog-content\") pod \"community-operators-qz84p\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.396979 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-utilities\") pod \"community-operators-qz84p\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.421436 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gh8nt\" (UniqueName: \"kubernetes.io/projected/585c8377-db56-4bbb-ae96-d31e130161cb-kube-api-access-gh8nt\") pod \"community-operators-qz84p\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:48 crc kubenswrapper[4492]: I1126 07:42:48.542311 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:49 crc kubenswrapper[4492]: I1126 07:42:49.388308 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qz84p"] Nov 26 07:42:49 crc kubenswrapper[4492]: I1126 07:42:49.672807 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qz84p" event={"ID":"585c8377-db56-4bbb-ae96-d31e130161cb","Type":"ContainerDied","Data":"b113b7eff32e63ca829f7b57a239fddaf5443b0ca0276d7a6ce001cf85a0bb57"} Nov 26 07:42:49 crc kubenswrapper[4492]: I1126 07:42:49.673022 4492 generic.go:334] "Generic (PLEG): container finished" podID="585c8377-db56-4bbb-ae96-d31e130161cb" containerID="b113b7eff32e63ca829f7b57a239fddaf5443b0ca0276d7a6ce001cf85a0bb57" exitCode=0 Nov 26 07:42:49 crc kubenswrapper[4492]: I1126 07:42:49.673089 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qz84p" event={"ID":"585c8377-db56-4bbb-ae96-d31e130161cb","Type":"ContainerStarted","Data":"fccd67d8dee44767f89ee1405855dd741a895a0c5a6848f2c9a20ae800fb682d"} Nov 26 07:42:49 crc kubenswrapper[4492]: I1126 07:42:49.676837 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:42:50 crc kubenswrapper[4492]: I1126 07:42:50.683830 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qz84p" event={"ID":"585c8377-db56-4bbb-ae96-d31e130161cb","Type":"ContainerStarted","Data":"5b2bbdc2276f84e8d0be4b36b4f1918533e630ad0d705d8f1f1c6dfd04f9c8d6"} Nov 26 07:42:51 crc kubenswrapper[4492]: I1126 07:42:51.695756 4492 generic.go:334] "Generic (PLEG): container finished" podID="585c8377-db56-4bbb-ae96-d31e130161cb" containerID="5b2bbdc2276f84e8d0be4b36b4f1918533e630ad0d705d8f1f1c6dfd04f9c8d6" exitCode=0 Nov 26 07:42:51 crc kubenswrapper[4492]: I1126 07:42:51.695819 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qz84p" event={"ID":"585c8377-db56-4bbb-ae96-d31e130161cb","Type":"ContainerDied","Data":"5b2bbdc2276f84e8d0be4b36b4f1918533e630ad0d705d8f1f1c6dfd04f9c8d6"} Nov 26 07:42:52 crc kubenswrapper[4492]: I1126 07:42:52.704816 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qz84p" event={"ID":"585c8377-db56-4bbb-ae96-d31e130161cb","Type":"ContainerStarted","Data":"aef19558c1d997ab00e86127b0ef5bbfd1b135750b1317195996089521fd480f"} Nov 26 07:42:52 crc kubenswrapper[4492]: I1126 07:42:52.729882 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qz84p" podStartSLOduration=3.1619217649999998 podStartE2EDuration="5.728729592s" podCreationTimestamp="2025-11-26 07:42:47 +0000 UTC" firstStartedPulling="2025-11-26 07:42:49.673973853 +0000 UTC m=+3265.557862151" lastFinishedPulling="2025-11-26 07:42:52.24078168 +0000 UTC m=+3268.124669978" observedRunningTime="2025-11-26 07:42:52.719277199 +0000 UTC m=+3268.603165497" watchObservedRunningTime="2025-11-26 07:42:52.728729592 +0000 UTC m=+3268.612617891" Nov 26 07:42:58 crc kubenswrapper[4492]: I1126 07:42:58.543780 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:58 crc kubenswrapper[4492]: I1126 07:42:58.544410 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:58 crc kubenswrapper[4492]: I1126 07:42:58.597596 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:42:58 crc kubenswrapper[4492]: I1126 07:42:58.790457 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:43:02 crc kubenswrapper[4492]: I1126 07:43:02.104103 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qz84p"] Nov 26 07:43:02 crc kubenswrapper[4492]: I1126 07:43:02.108282 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qz84p" podUID="585c8377-db56-4bbb-ae96-d31e130161cb" containerName="registry-server" containerID="cri-o://aef19558c1d997ab00e86127b0ef5bbfd1b135750b1317195996089521fd480f" gracePeriod=2 Nov 26 07:43:02 crc kubenswrapper[4492]: I1126 07:43:02.792551 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qz84p" event={"ID":"585c8377-db56-4bbb-ae96-d31e130161cb","Type":"ContainerDied","Data":"aef19558c1d997ab00e86127b0ef5bbfd1b135750b1317195996089521fd480f"} Nov 26 07:43:02 crc kubenswrapper[4492]: I1126 07:43:02.793024 4492 generic.go:334] "Generic (PLEG): container finished" podID="585c8377-db56-4bbb-ae96-d31e130161cb" containerID="aef19558c1d997ab00e86127b0ef5bbfd1b135750b1317195996089521fd480f" exitCode=0 Nov 26 07:43:02 crc kubenswrapper[4492]: I1126 07:43:02.983000 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.074773 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gh8nt\" (UniqueName: \"kubernetes.io/projected/585c8377-db56-4bbb-ae96-d31e130161cb-kube-api-access-gh8nt\") pod \"585c8377-db56-4bbb-ae96-d31e130161cb\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.074904 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-utilities\") pod \"585c8377-db56-4bbb-ae96-d31e130161cb\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.074962 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-catalog-content\") pod \"585c8377-db56-4bbb-ae96-d31e130161cb\" (UID: \"585c8377-db56-4bbb-ae96-d31e130161cb\") " Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.077315 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-utilities" (OuterVolumeSpecName: "utilities") pod "585c8377-db56-4bbb-ae96-d31e130161cb" (UID: "585c8377-db56-4bbb-ae96-d31e130161cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.089286 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/585c8377-db56-4bbb-ae96-d31e130161cb-kube-api-access-gh8nt" (OuterVolumeSpecName: "kube-api-access-gh8nt") pod "585c8377-db56-4bbb-ae96-d31e130161cb" (UID: "585c8377-db56-4bbb-ae96-d31e130161cb"). InnerVolumeSpecName "kube-api-access-gh8nt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.157347 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "585c8377-db56-4bbb-ae96-d31e130161cb" (UID: "585c8377-db56-4bbb-ae96-d31e130161cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.176540 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gh8nt\" (UniqueName: \"kubernetes.io/projected/585c8377-db56-4bbb-ae96-d31e130161cb-kube-api-access-gh8nt\") on node \"crc\" DevicePath \"\"" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.176572 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.176581 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/585c8377-db56-4bbb-ae96-d31e130161cb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.803148 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qz84p" event={"ID":"585c8377-db56-4bbb-ae96-d31e130161cb","Type":"ContainerDied","Data":"fccd67d8dee44767f89ee1405855dd741a895a0c5a6848f2c9a20ae800fb682d"} Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.803280 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qz84p" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.804021 4492 scope.go:117] "RemoveContainer" containerID="aef19558c1d997ab00e86127b0ef5bbfd1b135750b1317195996089521fd480f" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.837726 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qz84p"] Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.843445 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qz84p"] Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.849491 4492 scope.go:117] "RemoveContainer" containerID="5b2bbdc2276f84e8d0be4b36b4f1918533e630ad0d705d8f1f1c6dfd04f9c8d6" Nov 26 07:43:03 crc kubenswrapper[4492]: I1126 07:43:03.870819 4492 scope.go:117] "RemoveContainer" containerID="b113b7eff32e63ca829f7b57a239fddaf5443b0ca0276d7a6ce001cf85a0bb57" Nov 26 07:43:04 crc kubenswrapper[4492]: I1126 07:43:04.447711 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="585c8377-db56-4bbb-ae96-d31e130161cb" path="/var/lib/kubelet/pods/585c8377-db56-4bbb-ae96-d31e130161cb/volumes" Nov 26 07:43:13 crc kubenswrapper[4492]: E1126 07:43:13.482237 4492 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.180:52972->192.168.25.180:45641: write tcp 192.168.25.180:52972->192.168.25.180:45641: write: connection reset by peer Nov 26 07:43:14 crc kubenswrapper[4492]: E1126 07:43:14.533387 4492 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.180:57020->192.168.25.180:45641: write tcp 192.168.25.180:57020->192.168.25.180:45641: write: broken pipe Nov 26 07:43:25 crc kubenswrapper[4492]: I1126 07:43:25.630870 4492 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-645d6d85d7-cvr9h" podUID="e44b94a7-c7a7-40e1-8d00-9f27e0e0639e" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 26 07:44:19 crc kubenswrapper[4492]: I1126 07:44:19.444255 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:44:19 crc kubenswrapper[4492]: I1126 07:44:19.447365 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:44:49 crc kubenswrapper[4492]: I1126 07:44:49.443214 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:44:49 crc kubenswrapper[4492]: I1126 07:44:49.444301 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.655438 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd"] Nov 26 07:45:00 crc kubenswrapper[4492]: E1126 07:45:00.658075 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="585c8377-db56-4bbb-ae96-d31e130161cb" containerName="registry-server" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.658154 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="585c8377-db56-4bbb-ae96-d31e130161cb" containerName="registry-server" Nov 26 07:45:00 crc kubenswrapper[4492]: E1126 07:45:00.658748 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="585c8377-db56-4bbb-ae96-d31e130161cb" containerName="extract-utilities" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.658817 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="585c8377-db56-4bbb-ae96-d31e130161cb" containerName="extract-utilities" Nov 26 07:45:00 crc kubenswrapper[4492]: E1126 07:45:00.658888 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="585c8377-db56-4bbb-ae96-d31e130161cb" containerName="extract-content" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.658935 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="585c8377-db56-4bbb-ae96-d31e130161cb" containerName="extract-content" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.659500 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="585c8377-db56-4bbb-ae96-d31e130161cb" containerName="registry-server" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.662552 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.670476 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.670506 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.747003 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd"] Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.748152 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqdn2\" (UniqueName: \"kubernetes.io/projected/476ba433-3e71-45dc-9e94-bfb9ca40a73b-kube-api-access-mqdn2\") pod \"collect-profiles-29402385-mvfvd\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.748404 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/476ba433-3e71-45dc-9e94-bfb9ca40a73b-secret-volume\") pod \"collect-profiles-29402385-mvfvd\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.748553 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/476ba433-3e71-45dc-9e94-bfb9ca40a73b-config-volume\") pod \"collect-profiles-29402385-mvfvd\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.852114 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/476ba433-3e71-45dc-9e94-bfb9ca40a73b-config-volume\") pod \"collect-profiles-29402385-mvfvd\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.852213 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqdn2\" (UniqueName: \"kubernetes.io/projected/476ba433-3e71-45dc-9e94-bfb9ca40a73b-kube-api-access-mqdn2\") pod \"collect-profiles-29402385-mvfvd\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.852473 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/476ba433-3e71-45dc-9e94-bfb9ca40a73b-secret-volume\") pod \"collect-profiles-29402385-mvfvd\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.852693 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/476ba433-3e71-45dc-9e94-bfb9ca40a73b-config-volume\") pod \"collect-profiles-29402385-mvfvd\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.869218 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/476ba433-3e71-45dc-9e94-bfb9ca40a73b-secret-volume\") pod \"collect-profiles-29402385-mvfvd\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:00 crc kubenswrapper[4492]: I1126 07:45:00.872536 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqdn2\" (UniqueName: \"kubernetes.io/projected/476ba433-3e71-45dc-9e94-bfb9ca40a73b-kube-api-access-mqdn2\") pod \"collect-profiles-29402385-mvfvd\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:01 crc kubenswrapper[4492]: I1126 07:45:01.013962 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:01 crc kubenswrapper[4492]: I1126 07:45:01.769220 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd"] Nov 26 07:45:02 crc kubenswrapper[4492]: I1126 07:45:02.789784 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" event={"ID":"476ba433-3e71-45dc-9e94-bfb9ca40a73b","Type":"ContainerDied","Data":"8c2e245a131afeb8ff951a0d0027918abe07b3ee0ff33130bf9611156ddd7171"} Nov 26 07:45:02 crc kubenswrapper[4492]: I1126 07:45:02.790499 4492 generic.go:334] "Generic (PLEG): container finished" podID="476ba433-3e71-45dc-9e94-bfb9ca40a73b" containerID="8c2e245a131afeb8ff951a0d0027918abe07b3ee0ff33130bf9611156ddd7171" exitCode=0 Nov 26 07:45:02 crc kubenswrapper[4492]: I1126 07:45:02.791016 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" event={"ID":"476ba433-3e71-45dc-9e94-bfb9ca40a73b","Type":"ContainerStarted","Data":"5c4bdf8a3242d37179a3776806f4d6efb03eda994630d64ac7dad76e30ac6f1b"} Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.236656 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.334482 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/476ba433-3e71-45dc-9e94-bfb9ca40a73b-config-volume\") pod \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.334546 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/476ba433-3e71-45dc-9e94-bfb9ca40a73b-secret-volume\") pod \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.334597 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqdn2\" (UniqueName: \"kubernetes.io/projected/476ba433-3e71-45dc-9e94-bfb9ca40a73b-kube-api-access-mqdn2\") pod \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\" (UID: \"476ba433-3e71-45dc-9e94-bfb9ca40a73b\") " Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.336655 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/476ba433-3e71-45dc-9e94-bfb9ca40a73b-config-volume" (OuterVolumeSpecName: "config-volume") pod "476ba433-3e71-45dc-9e94-bfb9ca40a73b" (UID: "476ba433-3e71-45dc-9e94-bfb9ca40a73b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.344406 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/476ba433-3e71-45dc-9e94-bfb9ca40a73b-kube-api-access-mqdn2" (OuterVolumeSpecName: "kube-api-access-mqdn2") pod "476ba433-3e71-45dc-9e94-bfb9ca40a73b" (UID: "476ba433-3e71-45dc-9e94-bfb9ca40a73b"). InnerVolumeSpecName "kube-api-access-mqdn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.345428 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/476ba433-3e71-45dc-9e94-bfb9ca40a73b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "476ba433-3e71-45dc-9e94-bfb9ca40a73b" (UID: "476ba433-3e71-45dc-9e94-bfb9ca40a73b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.435926 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/476ba433-3e71-45dc-9e94-bfb9ca40a73b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.435955 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/476ba433-3e71-45dc-9e94-bfb9ca40a73b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.435964 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqdn2\" (UniqueName: \"kubernetes.io/projected/476ba433-3e71-45dc-9e94-bfb9ca40a73b-kube-api-access-mqdn2\") on node \"crc\" DevicePath \"\"" Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.809911 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" event={"ID":"476ba433-3e71-45dc-9e94-bfb9ca40a73b","Type":"ContainerDied","Data":"5c4bdf8a3242d37179a3776806f4d6efb03eda994630d64ac7dad76e30ac6f1b"} Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.809968 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c4bdf8a3242d37179a3776806f4d6efb03eda994630d64ac7dad76e30ac6f1b" Nov 26 07:45:04 crc kubenswrapper[4492]: I1126 07:45:04.809976 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd" Nov 26 07:45:05 crc kubenswrapper[4492]: I1126 07:45:05.312349 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v"] Nov 26 07:45:05 crc kubenswrapper[4492]: I1126 07:45:05.318072 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-scf6v"] Nov 26 07:45:06 crc kubenswrapper[4492]: I1126 07:45:06.448107 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b942b7c1-09d6-4424-a4b8-97f86a2a051f" path="/var/lib/kubelet/pods/b942b7c1-09d6-4424-a4b8-97f86a2a051f/volumes" Nov 26 07:45:19 crc kubenswrapper[4492]: I1126 07:45:19.441569 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:45:19 crc kubenswrapper[4492]: I1126 07:45:19.442102 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:45:19 crc kubenswrapper[4492]: I1126 07:45:19.442141 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:45:19 crc kubenswrapper[4492]: I1126 07:45:19.442849 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ff9d85070d91579120549cbc57175c28badf619aa6c5006efb62e2b7a647e0df"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:45:19 crc kubenswrapper[4492]: I1126 07:45:19.443271 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://ff9d85070d91579120549cbc57175c28badf619aa6c5006efb62e2b7a647e0df" gracePeriod=600 Nov 26 07:45:19 crc kubenswrapper[4492]: I1126 07:45:19.950543 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="ff9d85070d91579120549cbc57175c28badf619aa6c5006efb62e2b7a647e0df" exitCode=0 Nov 26 07:45:19 crc kubenswrapper[4492]: I1126 07:45:19.950614 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"ff9d85070d91579120549cbc57175c28badf619aa6c5006efb62e2b7a647e0df"} Nov 26 07:45:19 crc kubenswrapper[4492]: I1126 07:45:19.950906 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3"} Nov 26 07:45:19 crc kubenswrapper[4492]: I1126 07:45:19.951602 4492 scope.go:117] "RemoveContainer" containerID="79f63c8468d8f2a9f7913e668d160bf920560cab61936f2922bc0a2a52c08d34" Nov 26 07:45:59 crc kubenswrapper[4492]: I1126 07:45:59.846853 4492 scope.go:117] "RemoveContainer" containerID="03f75394e181f7125372b3bf6359b96389917a0e6ef13f9440fe3cd74b525237" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.370204 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jcs4d"] Nov 26 07:46:18 crc kubenswrapper[4492]: E1126 07:46:18.375344 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="476ba433-3e71-45dc-9e94-bfb9ca40a73b" containerName="collect-profiles" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.375377 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="476ba433-3e71-45dc-9e94-bfb9ca40a73b" containerName="collect-profiles" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.377113 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="476ba433-3e71-45dc-9e94-bfb9ca40a73b" containerName="collect-profiles" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.381842 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.485774 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-catalog-content\") pod \"redhat-marketplace-jcs4d\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.485866 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-utilities\") pod \"redhat-marketplace-jcs4d\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.485907 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqk8t\" (UniqueName: \"kubernetes.io/projected/d8647fab-9e14-4d9e-a972-aa550beec3a7-kube-api-access-cqk8t\") pod \"redhat-marketplace-jcs4d\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.494260 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcs4d"] Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.589504 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-catalog-content\") pod \"redhat-marketplace-jcs4d\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.589591 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-utilities\") pod \"redhat-marketplace-jcs4d\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.589819 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqk8t\" (UniqueName: \"kubernetes.io/projected/d8647fab-9e14-4d9e-a972-aa550beec3a7-kube-api-access-cqk8t\") pod \"redhat-marketplace-jcs4d\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.592493 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-utilities\") pod \"redhat-marketplace-jcs4d\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.592636 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-catalog-content\") pod \"redhat-marketplace-jcs4d\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.634393 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqk8t\" (UniqueName: \"kubernetes.io/projected/d8647fab-9e14-4d9e-a972-aa550beec3a7-kube-api-access-cqk8t\") pod \"redhat-marketplace-jcs4d\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:18 crc kubenswrapper[4492]: I1126 07:46:18.709992 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:19 crc kubenswrapper[4492]: I1126 07:46:19.740794 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcs4d"] Nov 26 07:46:20 crc kubenswrapper[4492]: I1126 07:46:20.542237 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcs4d" event={"ID":"d8647fab-9e14-4d9e-a972-aa550beec3a7","Type":"ContainerDied","Data":"3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c"} Nov 26 07:46:20 crc kubenswrapper[4492]: I1126 07:46:20.542543 4492 generic.go:334] "Generic (PLEG): container finished" podID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerID="3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c" exitCode=0 Nov 26 07:46:20 crc kubenswrapper[4492]: I1126 07:46:20.543329 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcs4d" event={"ID":"d8647fab-9e14-4d9e-a972-aa550beec3a7","Type":"ContainerStarted","Data":"fec5cdf87d41ad4d247eee3370ba6655ea00ab3b59e9b0e1b8918538d27144a5"} Nov 26 07:46:21 crc kubenswrapper[4492]: I1126 07:46:21.553006 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcs4d" event={"ID":"d8647fab-9e14-4d9e-a972-aa550beec3a7","Type":"ContainerStarted","Data":"6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d"} Nov 26 07:46:22 crc kubenswrapper[4492]: I1126 07:46:22.564743 4492 generic.go:334] "Generic (PLEG): container finished" podID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerID="6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d" exitCode=0 Nov 26 07:46:22 crc kubenswrapper[4492]: I1126 07:46:22.564880 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcs4d" event={"ID":"d8647fab-9e14-4d9e-a972-aa550beec3a7","Type":"ContainerDied","Data":"6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d"} Nov 26 07:46:23 crc kubenswrapper[4492]: I1126 07:46:23.574227 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcs4d" event={"ID":"d8647fab-9e14-4d9e-a972-aa550beec3a7","Type":"ContainerStarted","Data":"f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb"} Nov 26 07:46:28 crc kubenswrapper[4492]: I1126 07:46:28.710236 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:28 crc kubenswrapper[4492]: I1126 07:46:28.710646 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:28 crc kubenswrapper[4492]: I1126 07:46:28.842364 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:28 crc kubenswrapper[4492]: I1126 07:46:28.882204 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jcs4d" podStartSLOduration=8.348457419 podStartE2EDuration="10.874923552s" podCreationTimestamp="2025-11-26 07:46:18 +0000 UTC" firstStartedPulling="2025-11-26 07:46:20.544806916 +0000 UTC m=+3476.428695214" lastFinishedPulling="2025-11-26 07:46:23.071273049 +0000 UTC m=+3478.955161347" observedRunningTime="2025-11-26 07:46:23.595212196 +0000 UTC m=+3479.479100504" watchObservedRunningTime="2025-11-26 07:46:28.874923552 +0000 UTC m=+3484.758811850" Nov 26 07:46:29 crc kubenswrapper[4492]: I1126 07:46:29.673575 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:29 crc kubenswrapper[4492]: I1126 07:46:29.735388 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcs4d"] Nov 26 07:46:31 crc kubenswrapper[4492]: I1126 07:46:31.654275 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jcs4d" podUID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerName="registry-server" containerID="cri-o://f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb" gracePeriod=2 Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.430755 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.476512 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqk8t\" (UniqueName: \"kubernetes.io/projected/d8647fab-9e14-4d9e-a972-aa550beec3a7-kube-api-access-cqk8t\") pod \"d8647fab-9e14-4d9e-a972-aa550beec3a7\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.476829 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-catalog-content\") pod \"d8647fab-9e14-4d9e-a972-aa550beec3a7\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.477583 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-utilities\") pod \"d8647fab-9e14-4d9e-a972-aa550beec3a7\" (UID: \"d8647fab-9e14-4d9e-a972-aa550beec3a7\") " Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.479156 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-utilities" (OuterVolumeSpecName: "utilities") pod "d8647fab-9e14-4d9e-a972-aa550beec3a7" (UID: "d8647fab-9e14-4d9e-a972-aa550beec3a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.498664 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8647fab-9e14-4d9e-a972-aa550beec3a7-kube-api-access-cqk8t" (OuterVolumeSpecName: "kube-api-access-cqk8t") pod "d8647fab-9e14-4d9e-a972-aa550beec3a7" (UID: "d8647fab-9e14-4d9e-a972-aa550beec3a7"). InnerVolumeSpecName "kube-api-access-cqk8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.511412 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8647fab-9e14-4d9e-a972-aa550beec3a7" (UID: "d8647fab-9e14-4d9e-a972-aa550beec3a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.579928 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.580058 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqk8t\" (UniqueName: \"kubernetes.io/projected/d8647fab-9e14-4d9e-a972-aa550beec3a7-kube-api-access-cqk8t\") on node \"crc\" DevicePath \"\"" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.580142 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8647fab-9e14-4d9e-a972-aa550beec3a7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.665624 4492 generic.go:334] "Generic (PLEG): container finished" podID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerID="f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb" exitCode=0 Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.665809 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcs4d" event={"ID":"d8647fab-9e14-4d9e-a972-aa550beec3a7","Type":"ContainerDied","Data":"f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb"} Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.666396 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jcs4d" event={"ID":"d8647fab-9e14-4d9e-a972-aa550beec3a7","Type":"ContainerDied","Data":"fec5cdf87d41ad4d247eee3370ba6655ea00ab3b59e9b0e1b8918538d27144a5"} Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.665911 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jcs4d" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.667281 4492 scope.go:117] "RemoveContainer" containerID="f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.705593 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcs4d"] Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.705679 4492 scope.go:117] "RemoveContainer" containerID="6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.718916 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jcs4d"] Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.736001 4492 scope.go:117] "RemoveContainer" containerID="3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.772887 4492 scope.go:117] "RemoveContainer" containerID="f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb" Nov 26 07:46:32 crc kubenswrapper[4492]: E1126 07:46:32.775946 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb\": container with ID starting with f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb not found: ID does not exist" containerID="f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.776069 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb"} err="failed to get container status \"f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb\": rpc error: code = NotFound desc = could not find container \"f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb\": container with ID starting with f8ba1bae3b325ade6eb190d88c9c87b9f9d485c542deecf1bc521a8cae91fbcb not found: ID does not exist" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.776216 4492 scope.go:117] "RemoveContainer" containerID="6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d" Nov 26 07:46:32 crc kubenswrapper[4492]: E1126 07:46:32.776793 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d\": container with ID starting with 6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d not found: ID does not exist" containerID="6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.776857 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d"} err="failed to get container status \"6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d\": rpc error: code = NotFound desc = could not find container \"6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d\": container with ID starting with 6f5c9b93f7784125769975d08f78a5389bbc8cb86cee5287765313d9b2213a2d not found: ID does not exist" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.776900 4492 scope.go:117] "RemoveContainer" containerID="3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c" Nov 26 07:46:32 crc kubenswrapper[4492]: E1126 07:46:32.777417 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c\": container with ID starting with 3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c not found: ID does not exist" containerID="3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c" Nov 26 07:46:32 crc kubenswrapper[4492]: I1126 07:46:32.777459 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c"} err="failed to get container status \"3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c\": rpc error: code = NotFound desc = could not find container \"3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c\": container with ID starting with 3155f143905cf5831ad6ba6ff656a193380e1e223b0eb1fa650ef2cb4309073c not found: ID does not exist" Nov 26 07:46:34 crc kubenswrapper[4492]: I1126 07:46:34.448031 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8647fab-9e14-4d9e-a972-aa550beec3a7" path="/var/lib/kubelet/pods/d8647fab-9e14-4d9e-a972-aa550beec3a7/volumes" Nov 26 07:47:19 crc kubenswrapper[4492]: I1126 07:47:19.441563 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:47:19 crc kubenswrapper[4492]: I1126 07:47:19.443070 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:47:49 crc kubenswrapper[4492]: I1126 07:47:49.441350 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:47:49 crc kubenswrapper[4492]: I1126 07:47:49.442030 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.208882 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-f72rb"] Nov 26 07:48:18 crc kubenswrapper[4492]: E1126 07:48:18.211136 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerName="extract-content" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.211164 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerName="extract-content" Nov 26 07:48:18 crc kubenswrapper[4492]: E1126 07:48:18.211248 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerName="extract-utilities" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.211258 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerName="extract-utilities" Nov 26 07:48:18 crc kubenswrapper[4492]: E1126 07:48:18.211298 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerName="registry-server" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.211305 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerName="registry-server" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.211900 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8647fab-9e14-4d9e-a972-aa550beec3a7" containerName="registry-server" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.214688 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.231394 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f72rb"] Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.251640 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-utilities\") pod \"certified-operators-f72rb\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.251848 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-catalog-content\") pod \"certified-operators-f72rb\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.251939 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfbjp\" (UniqueName: \"kubernetes.io/projected/a69445af-2adb-454a-bb79-59c253c669a9-kube-api-access-sfbjp\") pod \"certified-operators-f72rb\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.354150 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-utilities\") pod \"certified-operators-f72rb\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.354785 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-catalog-content\") pod \"certified-operators-f72rb\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.355135 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfbjp\" (UniqueName: \"kubernetes.io/projected/a69445af-2adb-454a-bb79-59c253c669a9-kube-api-access-sfbjp\") pod \"certified-operators-f72rb\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.354689 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-utilities\") pod \"certified-operators-f72rb\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.355074 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-catalog-content\") pod \"certified-operators-f72rb\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.382133 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfbjp\" (UniqueName: \"kubernetes.io/projected/a69445af-2adb-454a-bb79-59c253c669a9-kube-api-access-sfbjp\") pod \"certified-operators-f72rb\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:18 crc kubenswrapper[4492]: I1126 07:48:18.535581 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.110088 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f72rb"] Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.441876 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.441949 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.442008 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.442979 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.443033 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" gracePeriod=600 Nov 26 07:48:19 crc kubenswrapper[4492]: E1126 07:48:19.573505 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.710277 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" exitCode=0 Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.710356 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3"} Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.710668 4492 scope.go:117] "RemoveContainer" containerID="ff9d85070d91579120549cbc57175c28badf619aa6c5006efb62e2b7a647e0df" Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.711383 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:48:19 crc kubenswrapper[4492]: E1126 07:48:19.711835 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.715960 4492 generic.go:334] "Generic (PLEG): container finished" podID="a69445af-2adb-454a-bb79-59c253c669a9" containerID="7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441" exitCode=0 Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.715997 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f72rb" event={"ID":"a69445af-2adb-454a-bb79-59c253c669a9","Type":"ContainerDied","Data":"7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441"} Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.716042 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f72rb" event={"ID":"a69445af-2adb-454a-bb79-59c253c669a9","Type":"ContainerStarted","Data":"51380de17abe4d4a122a897cffe52de104006c92088f2b3e16ea05032d44ff74"} Nov 26 07:48:19 crc kubenswrapper[4492]: I1126 07:48:19.722947 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:48:20 crc kubenswrapper[4492]: I1126 07:48:20.729377 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f72rb" event={"ID":"a69445af-2adb-454a-bb79-59c253c669a9","Type":"ContainerStarted","Data":"a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2"} Nov 26 07:48:21 crc kubenswrapper[4492]: I1126 07:48:21.739931 4492 generic.go:334] "Generic (PLEG): container finished" podID="a69445af-2adb-454a-bb79-59c253c669a9" containerID="a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2" exitCode=0 Nov 26 07:48:21 crc kubenswrapper[4492]: I1126 07:48:21.740026 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f72rb" event={"ID":"a69445af-2adb-454a-bb79-59c253c669a9","Type":"ContainerDied","Data":"a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2"} Nov 26 07:48:22 crc kubenswrapper[4492]: I1126 07:48:22.753334 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f72rb" event={"ID":"a69445af-2adb-454a-bb79-59c253c669a9","Type":"ContainerStarted","Data":"382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b"} Nov 26 07:48:22 crc kubenswrapper[4492]: I1126 07:48:22.778545 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-f72rb" podStartSLOduration=2.260645293 podStartE2EDuration="4.776703726s" podCreationTimestamp="2025-11-26 07:48:18 +0000 UTC" firstStartedPulling="2025-11-26 07:48:19.722701253 +0000 UTC m=+3595.606589552" lastFinishedPulling="2025-11-26 07:48:22.238759687 +0000 UTC m=+3598.122647985" observedRunningTime="2025-11-26 07:48:22.773485304 +0000 UTC m=+3598.657373602" watchObservedRunningTime="2025-11-26 07:48:22.776703726 +0000 UTC m=+3598.660592024" Nov 26 07:48:28 crc kubenswrapper[4492]: I1126 07:48:28.535913 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:28 crc kubenswrapper[4492]: I1126 07:48:28.536529 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:28 crc kubenswrapper[4492]: I1126 07:48:28.575486 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:28 crc kubenswrapper[4492]: I1126 07:48:28.855317 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:28 crc kubenswrapper[4492]: I1126 07:48:28.917924 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f72rb"] Nov 26 07:48:30 crc kubenswrapper[4492]: I1126 07:48:30.840607 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-f72rb" podUID="a69445af-2adb-454a-bb79-59c253c669a9" containerName="registry-server" containerID="cri-o://382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b" gracePeriod=2 Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.364726 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.530870 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-catalog-content\") pod \"a69445af-2adb-454a-bb79-59c253c669a9\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.531034 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-utilities\") pod \"a69445af-2adb-454a-bb79-59c253c669a9\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.531248 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfbjp\" (UniqueName: \"kubernetes.io/projected/a69445af-2adb-454a-bb79-59c253c669a9-kube-api-access-sfbjp\") pod \"a69445af-2adb-454a-bb79-59c253c669a9\" (UID: \"a69445af-2adb-454a-bb79-59c253c669a9\") " Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.531918 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-utilities" (OuterVolumeSpecName: "utilities") pod "a69445af-2adb-454a-bb79-59c253c669a9" (UID: "a69445af-2adb-454a-bb79-59c253c669a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.540084 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a69445af-2adb-454a-bb79-59c253c669a9-kube-api-access-sfbjp" (OuterVolumeSpecName: "kube-api-access-sfbjp") pod "a69445af-2adb-454a-bb79-59c253c669a9" (UID: "a69445af-2adb-454a-bb79-59c253c669a9"). InnerVolumeSpecName "kube-api-access-sfbjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.569582 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a69445af-2adb-454a-bb79-59c253c669a9" (UID: "a69445af-2adb-454a-bb79-59c253c669a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.633646 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.633676 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a69445af-2adb-454a-bb79-59c253c669a9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.633687 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfbjp\" (UniqueName: \"kubernetes.io/projected/a69445af-2adb-454a-bb79-59c253c669a9-kube-api-access-sfbjp\") on node \"crc\" DevicePath \"\"" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.850151 4492 generic.go:334] "Generic (PLEG): container finished" podID="a69445af-2adb-454a-bb79-59c253c669a9" containerID="382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b" exitCode=0 Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.850222 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f72rb" event={"ID":"a69445af-2adb-454a-bb79-59c253c669a9","Type":"ContainerDied","Data":"382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b"} Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.850247 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f72rb" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.850275 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f72rb" event={"ID":"a69445af-2adb-454a-bb79-59c253c669a9","Type":"ContainerDied","Data":"51380de17abe4d4a122a897cffe52de104006c92088f2b3e16ea05032d44ff74"} Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.850297 4492 scope.go:117] "RemoveContainer" containerID="382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.876187 4492 scope.go:117] "RemoveContainer" containerID="a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.878276 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f72rb"] Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.886538 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-f72rb"] Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.895997 4492 scope.go:117] "RemoveContainer" containerID="7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.944501 4492 scope.go:117] "RemoveContainer" containerID="382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b" Nov 26 07:48:31 crc kubenswrapper[4492]: E1126 07:48:31.945279 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b\": container with ID starting with 382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b not found: ID does not exist" containerID="382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.945325 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b"} err="failed to get container status \"382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b\": rpc error: code = NotFound desc = could not find container \"382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b\": container with ID starting with 382f917915bbf88ff7a2c5db27f2f57909e12f133e89b6efd3a792c8baec512b not found: ID does not exist" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.945358 4492 scope.go:117] "RemoveContainer" containerID="a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2" Nov 26 07:48:31 crc kubenswrapper[4492]: E1126 07:48:31.945736 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2\": container with ID starting with a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2 not found: ID does not exist" containerID="a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.945774 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2"} err="failed to get container status \"a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2\": rpc error: code = NotFound desc = could not find container \"a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2\": container with ID starting with a3391b2fcc4535fdf4a7509e7b80e8b3cd6bfc4bf2fe0f1cdda36e871f68a3c2 not found: ID does not exist" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.945796 4492 scope.go:117] "RemoveContainer" containerID="7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441" Nov 26 07:48:31 crc kubenswrapper[4492]: E1126 07:48:31.946281 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441\": container with ID starting with 7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441 not found: ID does not exist" containerID="7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441" Nov 26 07:48:31 crc kubenswrapper[4492]: I1126 07:48:31.946312 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441"} err="failed to get container status \"7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441\": rpc error: code = NotFound desc = could not find container \"7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441\": container with ID starting with 7f296e04057a056d64fc320ed611ad5284ffec482a67ffb2b58ab7d32561e441 not found: ID does not exist" Nov 26 07:48:32 crc kubenswrapper[4492]: I1126 07:48:32.451984 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a69445af-2adb-454a-bb79-59c253c669a9" path="/var/lib/kubelet/pods/a69445af-2adb-454a-bb79-59c253c669a9/volumes" Nov 26 07:48:35 crc kubenswrapper[4492]: I1126 07:48:35.439552 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:48:35 crc kubenswrapper[4492]: E1126 07:48:35.440208 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:48:50 crc kubenswrapper[4492]: I1126 07:48:50.439306 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:48:50 crc kubenswrapper[4492]: E1126 07:48:50.440465 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:49:04 crc kubenswrapper[4492]: I1126 07:49:04.445994 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:49:04 crc kubenswrapper[4492]: E1126 07:49:04.447103 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:49:18 crc kubenswrapper[4492]: I1126 07:49:18.438780 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:49:18 crc kubenswrapper[4492]: E1126 07:49:18.439733 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:49:33 crc kubenswrapper[4492]: I1126 07:49:33.438241 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:49:33 crc kubenswrapper[4492]: E1126 07:49:33.439281 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:49:47 crc kubenswrapper[4492]: I1126 07:49:47.438778 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:49:47 crc kubenswrapper[4492]: E1126 07:49:47.440709 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:49:59 crc kubenswrapper[4492]: I1126 07:49:59.439299 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:49:59 crc kubenswrapper[4492]: E1126 07:49:59.440375 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:50:14 crc kubenswrapper[4492]: I1126 07:50:14.444951 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:50:14 crc kubenswrapper[4492]: E1126 07:50:14.446437 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.671830 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kcpn5"] Nov 26 07:50:18 crc kubenswrapper[4492]: E1126 07:50:18.675998 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a69445af-2adb-454a-bb79-59c253c669a9" containerName="extract-content" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.676026 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a69445af-2adb-454a-bb79-59c253c669a9" containerName="extract-content" Nov 26 07:50:18 crc kubenswrapper[4492]: E1126 07:50:18.676049 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a69445af-2adb-454a-bb79-59c253c669a9" containerName="registry-server" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.676058 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a69445af-2adb-454a-bb79-59c253c669a9" containerName="registry-server" Nov 26 07:50:18 crc kubenswrapper[4492]: E1126 07:50:18.676085 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a69445af-2adb-454a-bb79-59c253c669a9" containerName="extract-utilities" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.676094 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a69445af-2adb-454a-bb79-59c253c669a9" containerName="extract-utilities" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.676378 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a69445af-2adb-454a-bb79-59c253c669a9" containerName="registry-server" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.678337 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.694843 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kcpn5"] Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.869226 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-utilities\") pod \"redhat-operators-kcpn5\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.869295 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjv8d\" (UniqueName: \"kubernetes.io/projected/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-kube-api-access-gjv8d\") pod \"redhat-operators-kcpn5\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.869336 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-catalog-content\") pod \"redhat-operators-kcpn5\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.972543 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-utilities\") pod \"redhat-operators-kcpn5\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.972605 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjv8d\" (UniqueName: \"kubernetes.io/projected/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-kube-api-access-gjv8d\") pod \"redhat-operators-kcpn5\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.972646 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-catalog-content\") pod \"redhat-operators-kcpn5\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.973065 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-utilities\") pod \"redhat-operators-kcpn5\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:18 crc kubenswrapper[4492]: I1126 07:50:18.973123 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-catalog-content\") pod \"redhat-operators-kcpn5\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:19 crc kubenswrapper[4492]: I1126 07:50:18.995938 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjv8d\" (UniqueName: \"kubernetes.io/projected/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-kube-api-access-gjv8d\") pod \"redhat-operators-kcpn5\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:19 crc kubenswrapper[4492]: I1126 07:50:19.007317 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:19 crc kubenswrapper[4492]: I1126 07:50:19.487787 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kcpn5"] Nov 26 07:50:19 crc kubenswrapper[4492]: I1126 07:50:19.882961 4492 generic.go:334] "Generic (PLEG): container finished" podID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerID="8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95" exitCode=0 Nov 26 07:50:19 crc kubenswrapper[4492]: I1126 07:50:19.883298 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kcpn5" event={"ID":"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f","Type":"ContainerDied","Data":"8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95"} Nov 26 07:50:19 crc kubenswrapper[4492]: I1126 07:50:19.883797 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kcpn5" event={"ID":"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f","Type":"ContainerStarted","Data":"c507179cc0bf7535540c09db0e78065bec5145be82117b190be9b65c904082b1"} Nov 26 07:50:20 crc kubenswrapper[4492]: I1126 07:50:20.894558 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kcpn5" event={"ID":"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f","Type":"ContainerStarted","Data":"71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6"} Nov 26 07:50:22 crc kubenswrapper[4492]: I1126 07:50:22.918767 4492 generic.go:334] "Generic (PLEG): container finished" podID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerID="71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6" exitCode=0 Nov 26 07:50:22 crc kubenswrapper[4492]: I1126 07:50:22.918878 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kcpn5" event={"ID":"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f","Type":"ContainerDied","Data":"71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6"} Nov 26 07:50:23 crc kubenswrapper[4492]: I1126 07:50:23.931443 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kcpn5" event={"ID":"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f","Type":"ContainerStarted","Data":"952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b"} Nov 26 07:50:23 crc kubenswrapper[4492]: I1126 07:50:23.952451 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kcpn5" podStartSLOduration=2.46973174 podStartE2EDuration="5.952420068s" podCreationTimestamp="2025-11-26 07:50:18 +0000 UTC" firstStartedPulling="2025-11-26 07:50:19.890875318 +0000 UTC m=+3715.774763615" lastFinishedPulling="2025-11-26 07:50:23.373563645 +0000 UTC m=+3719.257451943" observedRunningTime="2025-11-26 07:50:23.950202548 +0000 UTC m=+3719.834090846" watchObservedRunningTime="2025-11-26 07:50:23.952420068 +0000 UTC m=+3719.836308366" Nov 26 07:50:29 crc kubenswrapper[4492]: I1126 07:50:29.007841 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:29 crc kubenswrapper[4492]: I1126 07:50:29.008373 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:29 crc kubenswrapper[4492]: I1126 07:50:29.438903 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:50:29 crc kubenswrapper[4492]: E1126 07:50:29.439627 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:50:30 crc kubenswrapper[4492]: I1126 07:50:30.046101 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kcpn5" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerName="registry-server" probeResult="failure" output=< Nov 26 07:50:30 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 07:50:30 crc kubenswrapper[4492]: > Nov 26 07:50:39 crc kubenswrapper[4492]: I1126 07:50:39.048276 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:39 crc kubenswrapper[4492]: I1126 07:50:39.094673 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:39 crc kubenswrapper[4492]: I1126 07:50:39.289456 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kcpn5"] Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.085038 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kcpn5" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerName="registry-server" containerID="cri-o://952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b" gracePeriod=2 Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.439068 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:50:40 crc kubenswrapper[4492]: E1126 07:50:40.439599 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.744570 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.839479 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-utilities" (OuterVolumeSpecName: "utilities") pod "eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" (UID: "eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.839642 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-utilities\") pod \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.839757 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjv8d\" (UniqueName: \"kubernetes.io/projected/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-kube-api-access-gjv8d\") pod \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.840041 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-catalog-content\") pod \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\" (UID: \"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f\") " Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.840860 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.849814 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-kube-api-access-gjv8d" (OuterVolumeSpecName: "kube-api-access-gjv8d") pod "eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" (UID: "eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f"). InnerVolumeSpecName "kube-api-access-gjv8d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.908584 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" (UID: "eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.945498 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjv8d\" (UniqueName: \"kubernetes.io/projected/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-kube-api-access-gjv8d\") on node \"crc\" DevicePath \"\"" Nov 26 07:50:40 crc kubenswrapper[4492]: I1126 07:50:40.945540 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.094911 4492 generic.go:334] "Generic (PLEG): container finished" podID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerID="952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b" exitCode=0 Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.094977 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kcpn5" event={"ID":"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f","Type":"ContainerDied","Data":"952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b"} Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.094999 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kcpn5" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.095013 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kcpn5" event={"ID":"eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f","Type":"ContainerDied","Data":"c507179cc0bf7535540c09db0e78065bec5145be82117b190be9b65c904082b1"} Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.095033 4492 scope.go:117] "RemoveContainer" containerID="952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.137788 4492 scope.go:117] "RemoveContainer" containerID="71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.138812 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kcpn5"] Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.146417 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kcpn5"] Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.160013 4492 scope.go:117] "RemoveContainer" containerID="8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.198528 4492 scope.go:117] "RemoveContainer" containerID="952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b" Nov 26 07:50:41 crc kubenswrapper[4492]: E1126 07:50:41.199046 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b\": container with ID starting with 952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b not found: ID does not exist" containerID="952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.199105 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b"} err="failed to get container status \"952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b\": rpc error: code = NotFound desc = could not find container \"952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b\": container with ID starting with 952096a952a478ec379265fc81355ef19f349970b00ccb8d18885c221a87ce4b not found: ID does not exist" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.199142 4492 scope.go:117] "RemoveContainer" containerID="71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6" Nov 26 07:50:41 crc kubenswrapper[4492]: E1126 07:50:41.199638 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6\": container with ID starting with 71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6 not found: ID does not exist" containerID="71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.199670 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6"} err="failed to get container status \"71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6\": rpc error: code = NotFound desc = could not find container \"71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6\": container with ID starting with 71bf0fe74f67dae938a45fab5743ffb53cafbda157c398a1e1c9f31d086bb6d6 not found: ID does not exist" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.199698 4492 scope.go:117] "RemoveContainer" containerID="8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95" Nov 26 07:50:41 crc kubenswrapper[4492]: E1126 07:50:41.199970 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95\": container with ID starting with 8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95 not found: ID does not exist" containerID="8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95" Nov 26 07:50:41 crc kubenswrapper[4492]: I1126 07:50:41.200006 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95"} err="failed to get container status \"8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95\": rpc error: code = NotFound desc = could not find container \"8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95\": container with ID starting with 8f69be2139f3e72822f95dd6b1cc94888cda5681c8e9fc635aa5b5f088e9ee95 not found: ID does not exist" Nov 26 07:50:42 crc kubenswrapper[4492]: I1126 07:50:42.448213 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" path="/var/lib/kubelet/pods/eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f/volumes" Nov 26 07:50:53 crc kubenswrapper[4492]: I1126 07:50:53.439155 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:50:53 crc kubenswrapper[4492]: E1126 07:50:53.439988 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:51:04 crc kubenswrapper[4492]: I1126 07:51:04.447952 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:51:04 crc kubenswrapper[4492]: E1126 07:51:04.449163 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:51:17 crc kubenswrapper[4492]: I1126 07:51:17.439345 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:51:17 crc kubenswrapper[4492]: E1126 07:51:17.440308 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:51:30 crc kubenswrapper[4492]: I1126 07:51:30.439347 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:51:30 crc kubenswrapper[4492]: E1126 07:51:30.440322 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:51:43 crc kubenswrapper[4492]: I1126 07:51:43.438570 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:51:43 crc kubenswrapper[4492]: E1126 07:51:43.441004 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:51:54 crc kubenswrapper[4492]: I1126 07:51:54.444980 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:51:54 crc kubenswrapper[4492]: E1126 07:51:54.445842 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:52:07 crc kubenswrapper[4492]: I1126 07:52:07.438999 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:52:07 crc kubenswrapper[4492]: E1126 07:52:07.439987 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:52:22 crc kubenswrapper[4492]: I1126 07:52:22.439203 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:52:22 crc kubenswrapper[4492]: E1126 07:52:22.440182 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:52:35 crc kubenswrapper[4492]: I1126 07:52:35.439442 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:52:35 crc kubenswrapper[4492]: E1126 07:52:35.440167 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:52:48 crc kubenswrapper[4492]: I1126 07:52:48.439329 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:52:48 crc kubenswrapper[4492]: E1126 07:52:48.440041 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:53:01 crc kubenswrapper[4492]: I1126 07:53:01.439290 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:53:01 crc kubenswrapper[4492]: E1126 07:53:01.440167 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:53:16 crc kubenswrapper[4492]: I1126 07:53:16.439229 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:53:16 crc kubenswrapper[4492]: E1126 07:53:16.440233 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:53:27 crc kubenswrapper[4492]: I1126 07:53:27.439137 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:53:27 crc kubenswrapper[4492]: I1126 07:53:27.762142 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"1302d86044c6280198ffa29c8834eac5e4e2b2f2d92d6e57ffc98d40f7490351"} Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.431433 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-28hgv"] Nov 26 07:53:37 crc kubenswrapper[4492]: E1126 07:53:37.434604 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerName="extract-content" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.434663 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerName="extract-content" Nov 26 07:53:37 crc kubenswrapper[4492]: E1126 07:53:37.434716 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerName="extract-utilities" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.434726 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerName="extract-utilities" Nov 26 07:53:37 crc kubenswrapper[4492]: E1126 07:53:37.434747 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerName="registry-server" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.434753 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerName="registry-server" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.435604 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="eac0c1d9-8199-44a8-a2cc-0b1a5e19b20f" containerName="registry-server" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.439770 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.456220 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-28hgv"] Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.555544 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-utilities\") pod \"community-operators-28hgv\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.556396 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br42m\" (UniqueName: \"kubernetes.io/projected/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-kube-api-access-br42m\") pod \"community-operators-28hgv\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.556633 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-catalog-content\") pod \"community-operators-28hgv\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.659861 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-utilities\") pod \"community-operators-28hgv\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.660136 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br42m\" (UniqueName: \"kubernetes.io/projected/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-kube-api-access-br42m\") pod \"community-operators-28hgv\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.660233 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-catalog-content\") pod \"community-operators-28hgv\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.661836 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-utilities\") pod \"community-operators-28hgv\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.662398 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-catalog-content\") pod \"community-operators-28hgv\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.692728 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br42m\" (UniqueName: \"kubernetes.io/projected/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-kube-api-access-br42m\") pod \"community-operators-28hgv\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:37 crc kubenswrapper[4492]: I1126 07:53:37.763057 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:38 crc kubenswrapper[4492]: I1126 07:53:38.507946 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-28hgv"] Nov 26 07:53:38 crc kubenswrapper[4492]: W1126 07:53:38.524618 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1b009d7_7bdf_47c5_8075_9b7ac5cad5e9.slice/crio-a3cd977bd3a7740496b93c8d35bead595602b3691358fa9e629f53c697cdb1e9 WatchSource:0}: Error finding container a3cd977bd3a7740496b93c8d35bead595602b3691358fa9e629f53c697cdb1e9: Status 404 returned error can't find the container with id a3cd977bd3a7740496b93c8d35bead595602b3691358fa9e629f53c697cdb1e9 Nov 26 07:53:38 crc kubenswrapper[4492]: I1126 07:53:38.872961 4492 generic.go:334] "Generic (PLEG): container finished" podID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerID="e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f" exitCode=0 Nov 26 07:53:38 crc kubenswrapper[4492]: I1126 07:53:38.873029 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28hgv" event={"ID":"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9","Type":"ContainerDied","Data":"e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f"} Nov 26 07:53:38 crc kubenswrapper[4492]: I1126 07:53:38.873064 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28hgv" event={"ID":"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9","Type":"ContainerStarted","Data":"a3cd977bd3a7740496b93c8d35bead595602b3691358fa9e629f53c697cdb1e9"} Nov 26 07:53:38 crc kubenswrapper[4492]: I1126 07:53:38.878454 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:53:39 crc kubenswrapper[4492]: I1126 07:53:39.886773 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28hgv" event={"ID":"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9","Type":"ContainerStarted","Data":"d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb"} Nov 26 07:53:40 crc kubenswrapper[4492]: I1126 07:53:40.899896 4492 generic.go:334] "Generic (PLEG): container finished" podID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerID="d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb" exitCode=0 Nov 26 07:53:40 crc kubenswrapper[4492]: I1126 07:53:40.900006 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28hgv" event={"ID":"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9","Type":"ContainerDied","Data":"d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb"} Nov 26 07:53:41 crc kubenswrapper[4492]: I1126 07:53:41.915268 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28hgv" event={"ID":"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9","Type":"ContainerStarted","Data":"bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7"} Nov 26 07:53:41 crc kubenswrapper[4492]: I1126 07:53:41.950121 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-28hgv" podStartSLOduration=2.442331216 podStartE2EDuration="4.945284301s" podCreationTimestamp="2025-11-26 07:53:37 +0000 UTC" firstStartedPulling="2025-11-26 07:53:38.875958955 +0000 UTC m=+3914.759847253" lastFinishedPulling="2025-11-26 07:53:41.378912041 +0000 UTC m=+3917.262800338" observedRunningTime="2025-11-26 07:53:41.936847737 +0000 UTC m=+3917.820736035" watchObservedRunningTime="2025-11-26 07:53:41.945284301 +0000 UTC m=+3917.829172599" Nov 26 07:53:47 crc kubenswrapper[4492]: I1126 07:53:47.763862 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:47 crc kubenswrapper[4492]: I1126 07:53:47.764640 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:47 crc kubenswrapper[4492]: I1126 07:53:47.807717 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:48 crc kubenswrapper[4492]: I1126 07:53:48.020074 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:48 crc kubenswrapper[4492]: I1126 07:53:48.070825 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-28hgv"] Nov 26 07:53:49 crc kubenswrapper[4492]: I1126 07:53:49.990334 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-28hgv" podUID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerName="registry-server" containerID="cri-o://bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7" gracePeriod=2 Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.606701 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.714258 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br42m\" (UniqueName: \"kubernetes.io/projected/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-kube-api-access-br42m\") pod \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.714631 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-catalog-content\") pod \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.714717 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-utilities\") pod \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\" (UID: \"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9\") " Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.719579 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-utilities" (OuterVolumeSpecName: "utilities") pod "f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" (UID: "f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.749894 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-kube-api-access-br42m" (OuterVolumeSpecName: "kube-api-access-br42m") pod "f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" (UID: "f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9"). InnerVolumeSpecName "kube-api-access-br42m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.778277 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" (UID: "f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.817468 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.817496 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:53:50 crc kubenswrapper[4492]: I1126 07:53:50.817509 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br42m\" (UniqueName: \"kubernetes.io/projected/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9-kube-api-access-br42m\") on node \"crc\" DevicePath \"\"" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.004041 4492 generic.go:334] "Generic (PLEG): container finished" podID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerID="bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7" exitCode=0 Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.004109 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-28hgv" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.004113 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28hgv" event={"ID":"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9","Type":"ContainerDied","Data":"bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7"} Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.004580 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-28hgv" event={"ID":"f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9","Type":"ContainerDied","Data":"a3cd977bd3a7740496b93c8d35bead595602b3691358fa9e629f53c697cdb1e9"} Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.004614 4492 scope.go:117] "RemoveContainer" containerID="bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.041618 4492 scope.go:117] "RemoveContainer" containerID="d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.058001 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-28hgv"] Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.062295 4492 scope.go:117] "RemoveContainer" containerID="e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.064866 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-28hgv"] Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.098419 4492 scope.go:117] "RemoveContainer" containerID="bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7" Nov 26 07:53:51 crc kubenswrapper[4492]: E1126 07:53:51.100003 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7\": container with ID starting with bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7 not found: ID does not exist" containerID="bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.100714 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7"} err="failed to get container status \"bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7\": rpc error: code = NotFound desc = could not find container \"bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7\": container with ID starting with bb5e7ac1d442c36c70cbe6b52398e058b0b5fe59960ba25e07fcd98b5da720d7 not found: ID does not exist" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.100757 4492 scope.go:117] "RemoveContainer" containerID="d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb" Nov 26 07:53:51 crc kubenswrapper[4492]: E1126 07:53:51.101160 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb\": container with ID starting with d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb not found: ID does not exist" containerID="d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.101208 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb"} err="failed to get container status \"d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb\": rpc error: code = NotFound desc = could not find container \"d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb\": container with ID starting with d85aa361c9d3d880799c4d6a5b70f14bd4f3f425339a432cefb1d4b4a9b43bfb not found: ID does not exist" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.101236 4492 scope.go:117] "RemoveContainer" containerID="e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f" Nov 26 07:53:51 crc kubenswrapper[4492]: E1126 07:53:51.102156 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f\": container with ID starting with e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f not found: ID does not exist" containerID="e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f" Nov 26 07:53:51 crc kubenswrapper[4492]: I1126 07:53:51.102211 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f"} err="failed to get container status \"e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f\": rpc error: code = NotFound desc = could not find container \"e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f\": container with ID starting with e398591c4db3b8dbfe7df59ba68efbb55a954068d1f16eb71c0e91ea91c3845f not found: ID does not exist" Nov 26 07:53:52 crc kubenswrapper[4492]: I1126 07:53:52.453005 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" path="/var/lib/kubelet/pods/f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9/volumes" Nov 26 07:55:49 crc kubenswrapper[4492]: I1126 07:55:49.443752 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:55:49 crc kubenswrapper[4492]: I1126 07:55:49.446608 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:56:19 crc kubenswrapper[4492]: I1126 07:56:19.441437 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:56:19 crc kubenswrapper[4492]: I1126 07:56:19.443046 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.213122 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bnr2t"] Nov 26 07:56:28 crc kubenswrapper[4492]: E1126 07:56:28.215273 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerName="registry-server" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.215304 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerName="registry-server" Nov 26 07:56:28 crc kubenswrapper[4492]: E1126 07:56:28.215485 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerName="extract-content" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.215521 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerName="extract-content" Nov 26 07:56:28 crc kubenswrapper[4492]: E1126 07:56:28.215561 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerName="extract-utilities" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.215571 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerName="extract-utilities" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.216817 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1b009d7-7bdf-47c5-8075-9b7ac5cad5e9" containerName="registry-server" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.222262 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.268698 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mprqd\" (UniqueName: \"kubernetes.io/projected/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-kube-api-access-mprqd\") pod \"redhat-marketplace-bnr2t\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.268850 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-utilities\") pod \"redhat-marketplace-bnr2t\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.268947 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-catalog-content\") pod \"redhat-marketplace-bnr2t\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.288010 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnr2t"] Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.372355 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-catalog-content\") pod \"redhat-marketplace-bnr2t\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.372717 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mprqd\" (UniqueName: \"kubernetes.io/projected/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-kube-api-access-mprqd\") pod \"redhat-marketplace-bnr2t\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.372837 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-utilities\") pod \"redhat-marketplace-bnr2t\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.376799 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-catalog-content\") pod \"redhat-marketplace-bnr2t\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.377553 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-utilities\") pod \"redhat-marketplace-bnr2t\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.697704 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mprqd\" (UniqueName: \"kubernetes.io/projected/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-kube-api-access-mprqd\") pod \"redhat-marketplace-bnr2t\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:28 crc kubenswrapper[4492]: I1126 07:56:28.853125 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:29 crc kubenswrapper[4492]: I1126 07:56:29.730740 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnr2t"] Nov 26 07:56:30 crc kubenswrapper[4492]: I1126 07:56:30.725650 4492 generic.go:334] "Generic (PLEG): container finished" podID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerID="4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d" exitCode=0 Nov 26 07:56:30 crc kubenswrapper[4492]: I1126 07:56:30.726721 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnr2t" event={"ID":"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29","Type":"ContainerDied","Data":"4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d"} Nov 26 07:56:30 crc kubenswrapper[4492]: I1126 07:56:30.726986 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnr2t" event={"ID":"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29","Type":"ContainerStarted","Data":"f315047fca91ecae53e93bd6861cc8273c4af49556e1a673f93ee9b69801b013"} Nov 26 07:56:32 crc kubenswrapper[4492]: I1126 07:56:32.758323 4492 generic.go:334] "Generic (PLEG): container finished" podID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerID="139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8" exitCode=0 Nov 26 07:56:32 crc kubenswrapper[4492]: I1126 07:56:32.758610 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnr2t" event={"ID":"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29","Type":"ContainerDied","Data":"139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8"} Nov 26 07:56:33 crc kubenswrapper[4492]: I1126 07:56:33.775444 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnr2t" event={"ID":"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29","Type":"ContainerStarted","Data":"a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f"} Nov 26 07:56:33 crc kubenswrapper[4492]: I1126 07:56:33.804209 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bnr2t" podStartSLOduration=3.2868843930000002 podStartE2EDuration="5.80236783s" podCreationTimestamp="2025-11-26 07:56:28 +0000 UTC" firstStartedPulling="2025-11-26 07:56:30.756256393 +0000 UTC m=+4086.640144690" lastFinishedPulling="2025-11-26 07:56:33.271739829 +0000 UTC m=+4089.155628127" observedRunningTime="2025-11-26 07:56:33.793073064 +0000 UTC m=+4089.676970098" watchObservedRunningTime="2025-11-26 07:56:33.80236783 +0000 UTC m=+4089.686256128" Nov 26 07:56:38 crc kubenswrapper[4492]: I1126 07:56:38.854193 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:38 crc kubenswrapper[4492]: I1126 07:56:38.856315 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:38 crc kubenswrapper[4492]: I1126 07:56:38.919076 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:39 crc kubenswrapper[4492]: I1126 07:56:39.883544 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:39 crc kubenswrapper[4492]: I1126 07:56:39.937289 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnr2t"] Nov 26 07:56:41 crc kubenswrapper[4492]: I1126 07:56:41.854694 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bnr2t" podUID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerName="registry-server" containerID="cri-o://a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f" gracePeriod=2 Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.441615 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.517700 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-utilities\") pod \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.517902 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mprqd\" (UniqueName: \"kubernetes.io/projected/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-kube-api-access-mprqd\") pod \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.517996 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-catalog-content\") pod \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\" (UID: \"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29\") " Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.520389 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-utilities" (OuterVolumeSpecName: "utilities") pod "6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" (UID: "6d29de84-1bd8-4c4f-b3d8-ff1df67beb29"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.534205 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-kube-api-access-mprqd" (OuterVolumeSpecName: "kube-api-access-mprqd") pod "6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" (UID: "6d29de84-1bd8-4c4f-b3d8-ff1df67beb29"). InnerVolumeSpecName "kube-api-access-mprqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.539223 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" (UID: "6d29de84-1bd8-4c4f-b3d8-ff1df67beb29"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.621410 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.621750 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mprqd\" (UniqueName: \"kubernetes.io/projected/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-kube-api-access-mprqd\") on node \"crc\" DevicePath \"\"" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.621768 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.868140 4492 generic.go:334] "Generic (PLEG): container finished" podID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerID="a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f" exitCode=0 Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.868226 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnr2t" event={"ID":"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29","Type":"ContainerDied","Data":"a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f"} Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.868268 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnr2t" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.868304 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnr2t" event={"ID":"6d29de84-1bd8-4c4f-b3d8-ff1df67beb29","Type":"ContainerDied","Data":"f315047fca91ecae53e93bd6861cc8273c4af49556e1a673f93ee9b69801b013"} Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.868674 4492 scope.go:117] "RemoveContainer" containerID="a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.925949 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnr2t"] Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.936595 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnr2t"] Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.936827 4492 scope.go:117] "RemoveContainer" containerID="139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.957167 4492 scope.go:117] "RemoveContainer" containerID="4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.993860 4492 scope.go:117] "RemoveContainer" containerID="a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f" Nov 26 07:56:42 crc kubenswrapper[4492]: E1126 07:56:42.995609 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f\": container with ID starting with a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f not found: ID does not exist" containerID="a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.996096 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f"} err="failed to get container status \"a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f\": rpc error: code = NotFound desc = could not find container \"a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f\": container with ID starting with a9aadde3a91684b4077730fcd8f8ff58855e03d42a0099e3aa4c15ab3191103f not found: ID does not exist" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.996149 4492 scope.go:117] "RemoveContainer" containerID="139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8" Nov 26 07:56:42 crc kubenswrapper[4492]: E1126 07:56:42.996552 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8\": container with ID starting with 139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8 not found: ID does not exist" containerID="139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.996581 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8"} err="failed to get container status \"139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8\": rpc error: code = NotFound desc = could not find container \"139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8\": container with ID starting with 139b5717cc780db169cd7659ee3f51a835c963894170e6523043a807b1f314c8 not found: ID does not exist" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.996608 4492 scope.go:117] "RemoveContainer" containerID="4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d" Nov 26 07:56:42 crc kubenswrapper[4492]: E1126 07:56:42.997061 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d\": container with ID starting with 4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d not found: ID does not exist" containerID="4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d" Nov 26 07:56:42 crc kubenswrapper[4492]: I1126 07:56:42.997096 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d"} err="failed to get container status \"4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d\": rpc error: code = NotFound desc = could not find container \"4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d\": container with ID starting with 4fbd77eda6231ac05c5fe5df0dd0179689ab9baf2d4bbf2080505daa7c4fc56d not found: ID does not exist" Nov 26 07:56:44 crc kubenswrapper[4492]: I1126 07:56:44.450754 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" path="/var/lib/kubelet/pods/6d29de84-1bd8-4c4f-b3d8-ff1df67beb29/volumes" Nov 26 07:56:49 crc kubenswrapper[4492]: I1126 07:56:49.445696 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:56:49 crc kubenswrapper[4492]: I1126 07:56:49.446428 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:56:49 crc kubenswrapper[4492]: I1126 07:56:49.446497 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:56:49 crc kubenswrapper[4492]: I1126 07:56:49.447947 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1302d86044c6280198ffa29c8834eac5e4e2b2f2d92d6e57ffc98d40f7490351"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:56:49 crc kubenswrapper[4492]: I1126 07:56:49.448008 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://1302d86044c6280198ffa29c8834eac5e4e2b2f2d92d6e57ffc98d40f7490351" gracePeriod=600 Nov 26 07:56:49 crc kubenswrapper[4492]: I1126 07:56:49.959924 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="1302d86044c6280198ffa29c8834eac5e4e2b2f2d92d6e57ffc98d40f7490351" exitCode=0 Nov 26 07:56:49 crc kubenswrapper[4492]: I1126 07:56:49.959977 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"1302d86044c6280198ffa29c8834eac5e4e2b2f2d92d6e57ffc98d40f7490351"} Nov 26 07:56:49 crc kubenswrapper[4492]: I1126 07:56:49.960707 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb"} Nov 26 07:56:49 crc kubenswrapper[4492]: I1126 07:56:49.960790 4492 scope.go:117] "RemoveContainer" containerID="99084e163dffbfd460c50e193e52b41c0d10ca6289f7305e6fd05dca2e26f9c3" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.232477 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5n6ws"] Nov 26 07:58:39 crc kubenswrapper[4492]: E1126 07:58:39.238142 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerName="extract-content" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.238230 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerName="extract-content" Nov 26 07:58:39 crc kubenswrapper[4492]: E1126 07:58:39.238263 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerName="extract-utilities" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.238402 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerName="extract-utilities" Nov 26 07:58:39 crc kubenswrapper[4492]: E1126 07:58:39.239773 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerName="registry-server" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.239805 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerName="registry-server" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.241105 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d29de84-1bd8-4c4f-b3d8-ff1df67beb29" containerName="registry-server" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.252016 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.261944 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5n6ws"] Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.285970 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-catalog-content\") pod \"certified-operators-5n6ws\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.286500 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-utilities\") pod \"certified-operators-5n6ws\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.286626 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvj9k\" (UniqueName: \"kubernetes.io/projected/214df8f7-cc90-4aec-8b0a-b146dba0db81-kube-api-access-rvj9k\") pod \"certified-operators-5n6ws\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.388035 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-catalog-content\") pod \"certified-operators-5n6ws\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.388376 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-utilities\") pod \"certified-operators-5n6ws\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.388490 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvj9k\" (UniqueName: \"kubernetes.io/projected/214df8f7-cc90-4aec-8b0a-b146dba0db81-kube-api-access-rvj9k\") pod \"certified-operators-5n6ws\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.388508 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-catalog-content\") pod \"certified-operators-5n6ws\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.388725 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-utilities\") pod \"certified-operators-5n6ws\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.592392 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvj9k\" (UniqueName: \"kubernetes.io/projected/214df8f7-cc90-4aec-8b0a-b146dba0db81-kube-api-access-rvj9k\") pod \"certified-operators-5n6ws\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:39 crc kubenswrapper[4492]: I1126 07:58:39.618545 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:40 crc kubenswrapper[4492]: I1126 07:58:40.232502 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5n6ws"] Nov 26 07:58:41 crc kubenswrapper[4492]: I1126 07:58:41.028701 4492 generic.go:334] "Generic (PLEG): container finished" podID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerID="720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59" exitCode=0 Nov 26 07:58:41 crc kubenswrapper[4492]: I1126 07:58:41.029469 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5n6ws" event={"ID":"214df8f7-cc90-4aec-8b0a-b146dba0db81","Type":"ContainerDied","Data":"720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59"} Nov 26 07:58:41 crc kubenswrapper[4492]: I1126 07:58:41.029517 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5n6ws" event={"ID":"214df8f7-cc90-4aec-8b0a-b146dba0db81","Type":"ContainerStarted","Data":"1161f54b0ab688ba4cab7f6978eb996cfda7bbda2b98d55a4b5be610abffa35c"} Nov 26 07:58:41 crc kubenswrapper[4492]: I1126 07:58:41.033160 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:58:42 crc kubenswrapper[4492]: I1126 07:58:42.042191 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5n6ws" event={"ID":"214df8f7-cc90-4aec-8b0a-b146dba0db81","Type":"ContainerStarted","Data":"cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118"} Nov 26 07:58:43 crc kubenswrapper[4492]: I1126 07:58:43.057588 4492 generic.go:334] "Generic (PLEG): container finished" podID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerID="cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118" exitCode=0 Nov 26 07:58:43 crc kubenswrapper[4492]: I1126 07:58:43.057672 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5n6ws" event={"ID":"214df8f7-cc90-4aec-8b0a-b146dba0db81","Type":"ContainerDied","Data":"cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118"} Nov 26 07:58:44 crc kubenswrapper[4492]: I1126 07:58:44.067923 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5n6ws" event={"ID":"214df8f7-cc90-4aec-8b0a-b146dba0db81","Type":"ContainerStarted","Data":"5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4"} Nov 26 07:58:44 crc kubenswrapper[4492]: I1126 07:58:44.091325 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5n6ws" podStartSLOduration=2.605380901 podStartE2EDuration="5.091307275s" podCreationTimestamp="2025-11-26 07:58:39 +0000 UTC" firstStartedPulling="2025-11-26 07:58:41.032304426 +0000 UTC m=+4216.916192723" lastFinishedPulling="2025-11-26 07:58:43.518230799 +0000 UTC m=+4219.402119097" observedRunningTime="2025-11-26 07:58:44.08892179 +0000 UTC m=+4219.972810098" watchObservedRunningTime="2025-11-26 07:58:44.091307275 +0000 UTC m=+4219.975195573" Nov 26 07:58:49 crc kubenswrapper[4492]: I1126 07:58:49.441589 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:58:49 crc kubenswrapper[4492]: I1126 07:58:49.442276 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:58:49 crc kubenswrapper[4492]: I1126 07:58:49.619011 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:49 crc kubenswrapper[4492]: I1126 07:58:49.619057 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:49 crc kubenswrapper[4492]: I1126 07:58:49.660672 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:50 crc kubenswrapper[4492]: I1126 07:58:50.448763 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:50 crc kubenswrapper[4492]: I1126 07:58:50.521303 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5n6ws"] Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.134904 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5n6ws" podUID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerName="registry-server" containerID="cri-o://5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4" gracePeriod=2 Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.704846 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.814580 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-catalog-content\") pod \"214df8f7-cc90-4aec-8b0a-b146dba0db81\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.814668 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvj9k\" (UniqueName: \"kubernetes.io/projected/214df8f7-cc90-4aec-8b0a-b146dba0db81-kube-api-access-rvj9k\") pod \"214df8f7-cc90-4aec-8b0a-b146dba0db81\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.814757 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-utilities\") pod \"214df8f7-cc90-4aec-8b0a-b146dba0db81\" (UID: \"214df8f7-cc90-4aec-8b0a-b146dba0db81\") " Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.815650 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-utilities" (OuterVolumeSpecName: "utilities") pod "214df8f7-cc90-4aec-8b0a-b146dba0db81" (UID: "214df8f7-cc90-4aec-8b0a-b146dba0db81"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.824472 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/214df8f7-cc90-4aec-8b0a-b146dba0db81-kube-api-access-rvj9k" (OuterVolumeSpecName: "kube-api-access-rvj9k") pod "214df8f7-cc90-4aec-8b0a-b146dba0db81" (UID: "214df8f7-cc90-4aec-8b0a-b146dba0db81"). InnerVolumeSpecName "kube-api-access-rvj9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.858460 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "214df8f7-cc90-4aec-8b0a-b146dba0db81" (UID: "214df8f7-cc90-4aec-8b0a-b146dba0db81"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.918149 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.918204 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvj9k\" (UniqueName: \"kubernetes.io/projected/214df8f7-cc90-4aec-8b0a-b146dba0db81-kube-api-access-rvj9k\") on node \"crc\" DevicePath \"\"" Nov 26 07:58:52 crc kubenswrapper[4492]: I1126 07:58:52.918219 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/214df8f7-cc90-4aec-8b0a-b146dba0db81-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.148334 4492 generic.go:334] "Generic (PLEG): container finished" podID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerID="5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4" exitCode=0 Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.148387 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5n6ws" event={"ID":"214df8f7-cc90-4aec-8b0a-b146dba0db81","Type":"ContainerDied","Data":"5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4"} Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.148423 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5n6ws" event={"ID":"214df8f7-cc90-4aec-8b0a-b146dba0db81","Type":"ContainerDied","Data":"1161f54b0ab688ba4cab7f6978eb996cfda7bbda2b98d55a4b5be610abffa35c"} Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.148443 4492 scope.go:117] "RemoveContainer" containerID="5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.148584 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5n6ws" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.183829 4492 scope.go:117] "RemoveContainer" containerID="cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.187370 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5n6ws"] Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.198274 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5n6ws"] Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.210912 4492 scope.go:117] "RemoveContainer" containerID="720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.242049 4492 scope.go:117] "RemoveContainer" containerID="5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4" Nov 26 07:58:53 crc kubenswrapper[4492]: E1126 07:58:53.242499 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4\": container with ID starting with 5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4 not found: ID does not exist" containerID="5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.242566 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4"} err="failed to get container status \"5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4\": rpc error: code = NotFound desc = could not find container \"5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4\": container with ID starting with 5ec63e0227a2f048b7fc1be7939924f9c20ffed29020a949392d83e1a11b05b4 not found: ID does not exist" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.242621 4492 scope.go:117] "RemoveContainer" containerID="cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118" Nov 26 07:58:53 crc kubenswrapper[4492]: E1126 07:58:53.242998 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118\": container with ID starting with cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118 not found: ID does not exist" containerID="cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.243036 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118"} err="failed to get container status \"cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118\": rpc error: code = NotFound desc = could not find container \"cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118\": container with ID starting with cdb6456e0e9dcae8fad56116c88a13290b2b4533f0de7f3d773b94cf2fa90118 not found: ID does not exist" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.243060 4492 scope.go:117] "RemoveContainer" containerID="720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59" Nov 26 07:58:53 crc kubenswrapper[4492]: E1126 07:58:53.243390 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59\": container with ID starting with 720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59 not found: ID does not exist" containerID="720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59" Nov 26 07:58:53 crc kubenswrapper[4492]: I1126 07:58:53.243424 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59"} err="failed to get container status \"720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59\": rpc error: code = NotFound desc = could not find container \"720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59\": container with ID starting with 720a47d0b6c8d4c910ae4c688867eb831a92ce697d4afd461332bd6b235a6c59 not found: ID does not exist" Nov 26 07:58:54 crc kubenswrapper[4492]: I1126 07:58:54.447159 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="214df8f7-cc90-4aec-8b0a-b146dba0db81" path="/var/lib/kubelet/pods/214df8f7-cc90-4aec-8b0a-b146dba0db81/volumes" Nov 26 07:59:19 crc kubenswrapper[4492]: I1126 07:59:19.441553 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:59:19 crc kubenswrapper[4492]: I1126 07:59:19.442068 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:59:49 crc kubenswrapper[4492]: I1126 07:59:49.441391 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:59:49 crc kubenswrapper[4492]: I1126 07:59:49.441782 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:59:49 crc kubenswrapper[4492]: I1126 07:59:49.441817 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 07:59:49 crc kubenswrapper[4492]: I1126 07:59:49.442191 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:59:49 crc kubenswrapper[4492]: I1126 07:59:49.442245 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" gracePeriod=600 Nov 26 07:59:49 crc kubenswrapper[4492]: E1126 07:59:49.556602 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 07:59:49 crc kubenswrapper[4492]: I1126 07:59:49.574765 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" exitCode=0 Nov 26 07:59:49 crc kubenswrapper[4492]: I1126 07:59:49.574807 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb"} Nov 26 07:59:49 crc kubenswrapper[4492]: I1126 07:59:49.574846 4492 scope.go:117] "RemoveContainer" containerID="1302d86044c6280198ffa29c8834eac5e4e2b2f2d92d6e57ffc98d40f7490351" Nov 26 07:59:49 crc kubenswrapper[4492]: I1126 07:59:49.575425 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 07:59:49 crc kubenswrapper[4492]: E1126 07:59:49.575658 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.199732 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86"] Nov 26 08:00:00 crc kubenswrapper[4492]: E1126 08:00:00.200459 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerName="registry-server" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.200472 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerName="registry-server" Nov 26 08:00:00 crc kubenswrapper[4492]: E1126 08:00:00.200493 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerName="extract-content" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.200498 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerName="extract-content" Nov 26 08:00:00 crc kubenswrapper[4492]: E1126 08:00:00.200522 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerName="extract-utilities" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.200528 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerName="extract-utilities" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.200724 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="214df8f7-cc90-4aec-8b0a-b146dba0db81" containerName="registry-server" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.201403 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.207470 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86"] Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.212867 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.212872 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.235585 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vp7mj\" (UniqueName: \"kubernetes.io/projected/649543fe-36bb-4582-877f-2dc13813134b-kube-api-access-vp7mj\") pod \"collect-profiles-29402400-sxl86\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.235991 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/649543fe-36bb-4582-877f-2dc13813134b-config-volume\") pod \"collect-profiles-29402400-sxl86\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.236151 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/649543fe-36bb-4582-877f-2dc13813134b-secret-volume\") pod \"collect-profiles-29402400-sxl86\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.337147 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/649543fe-36bb-4582-877f-2dc13813134b-config-volume\") pod \"collect-profiles-29402400-sxl86\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.337247 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/649543fe-36bb-4582-877f-2dc13813134b-secret-volume\") pod \"collect-profiles-29402400-sxl86\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.337292 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vp7mj\" (UniqueName: \"kubernetes.io/projected/649543fe-36bb-4582-877f-2dc13813134b-kube-api-access-vp7mj\") pod \"collect-profiles-29402400-sxl86\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.338197 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/649543fe-36bb-4582-877f-2dc13813134b-config-volume\") pod \"collect-profiles-29402400-sxl86\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.342466 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/649543fe-36bb-4582-877f-2dc13813134b-secret-volume\") pod \"collect-profiles-29402400-sxl86\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.351350 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vp7mj\" (UniqueName: \"kubernetes.io/projected/649543fe-36bb-4582-877f-2dc13813134b-kube-api-access-vp7mj\") pod \"collect-profiles-29402400-sxl86\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.528819 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:00 crc kubenswrapper[4492]: I1126 08:00:00.935427 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86"] Nov 26 08:00:01 crc kubenswrapper[4492]: I1126 08:00:01.671819 4492 generic.go:334] "Generic (PLEG): container finished" podID="649543fe-36bb-4582-877f-2dc13813134b" containerID="72695bb6b59cf0a7b4f51323b6e0bb759dd02db52676d8a218e363c0f918a1ca" exitCode=0 Nov 26 08:00:01 crc kubenswrapper[4492]: I1126 08:00:01.672031 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" event={"ID":"649543fe-36bb-4582-877f-2dc13813134b","Type":"ContainerDied","Data":"72695bb6b59cf0a7b4f51323b6e0bb759dd02db52676d8a218e363c0f918a1ca"} Nov 26 08:00:01 crc kubenswrapper[4492]: I1126 08:00:01.672058 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" event={"ID":"649543fe-36bb-4582-877f-2dc13813134b","Type":"ContainerStarted","Data":"962e42f596aafe1ed745eed0157bfaa75ec4594d8b31f039e86d50d02d192d00"} Nov 26 08:00:02 crc kubenswrapper[4492]: I1126 08:00:02.438864 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:00:02 crc kubenswrapper[4492]: E1126 08:00:02.439451 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.028812 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.084254 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vp7mj\" (UniqueName: \"kubernetes.io/projected/649543fe-36bb-4582-877f-2dc13813134b-kube-api-access-vp7mj\") pod \"649543fe-36bb-4582-877f-2dc13813134b\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.084408 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/649543fe-36bb-4582-877f-2dc13813134b-secret-volume\") pod \"649543fe-36bb-4582-877f-2dc13813134b\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.085084 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/649543fe-36bb-4582-877f-2dc13813134b-config-volume\") pod \"649543fe-36bb-4582-877f-2dc13813134b\" (UID: \"649543fe-36bb-4582-877f-2dc13813134b\") " Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.085570 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/649543fe-36bb-4582-877f-2dc13813134b-config-volume" (OuterVolumeSpecName: "config-volume") pod "649543fe-36bb-4582-877f-2dc13813134b" (UID: "649543fe-36bb-4582-877f-2dc13813134b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.089166 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/649543fe-36bb-4582-877f-2dc13813134b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "649543fe-36bb-4582-877f-2dc13813134b" (UID: "649543fe-36bb-4582-877f-2dc13813134b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.089357 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/649543fe-36bb-4582-877f-2dc13813134b-kube-api-access-vp7mj" (OuterVolumeSpecName: "kube-api-access-vp7mj") pod "649543fe-36bb-4582-877f-2dc13813134b" (UID: "649543fe-36bb-4582-877f-2dc13813134b"). InnerVolumeSpecName "kube-api-access-vp7mj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.191776 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/649543fe-36bb-4582-877f-2dc13813134b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.191954 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vp7mj\" (UniqueName: \"kubernetes.io/projected/649543fe-36bb-4582-877f-2dc13813134b-kube-api-access-vp7mj\") on node \"crc\" DevicePath \"\"" Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.192031 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/649543fe-36bb-4582-877f-2dc13813134b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.687049 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" event={"ID":"649543fe-36bb-4582-877f-2dc13813134b","Type":"ContainerDied","Data":"962e42f596aafe1ed745eed0157bfaa75ec4594d8b31f039e86d50d02d192d00"} Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.687099 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86" Nov 26 08:00:03 crc kubenswrapper[4492]: I1126 08:00:03.687293 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="962e42f596aafe1ed745eed0157bfaa75ec4594d8b31f039e86d50d02d192d00" Nov 26 08:00:04 crc kubenswrapper[4492]: I1126 08:00:04.105509 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz"] Nov 26 08:00:04 crc kubenswrapper[4492]: I1126 08:00:04.113349 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402355-dtvcz"] Nov 26 08:00:04 crc kubenswrapper[4492]: I1126 08:00:04.450215 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cd3ee71-cc07-4cbb-93aa-f732ffa793ca" path="/var/lib/kubelet/pods/4cd3ee71-cc07-4cbb-93aa-f732ffa793ca/volumes" Nov 26 08:00:14 crc kubenswrapper[4492]: I1126 08:00:14.444920 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:00:14 crc kubenswrapper[4492]: E1126 08:00:14.446288 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:00:29 crc kubenswrapper[4492]: I1126 08:00:29.438582 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:00:29 crc kubenswrapper[4492]: E1126 08:00:29.439363 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.710203 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-65sb7"] Nov 26 08:00:33 crc kubenswrapper[4492]: E1126 08:00:33.710950 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="649543fe-36bb-4582-877f-2dc13813134b" containerName="collect-profiles" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.710965 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="649543fe-36bb-4582-877f-2dc13813134b" containerName="collect-profiles" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.711145 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="649543fe-36bb-4582-877f-2dc13813134b" containerName="collect-profiles" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.712457 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.720387 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-65sb7"] Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.867448 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-catalog-content\") pod \"redhat-operators-65sb7\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.867770 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dk9qv\" (UniqueName: \"kubernetes.io/projected/dbfebb13-8fa0-4856-935d-51e14bfaea7f-kube-api-access-dk9qv\") pod \"redhat-operators-65sb7\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.867797 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-utilities\") pod \"redhat-operators-65sb7\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.968966 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-catalog-content\") pod \"redhat-operators-65sb7\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.969397 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-catalog-content\") pod \"redhat-operators-65sb7\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.969515 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dk9qv\" (UniqueName: \"kubernetes.io/projected/dbfebb13-8fa0-4856-935d-51e14bfaea7f-kube-api-access-dk9qv\") pod \"redhat-operators-65sb7\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.969545 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-utilities\") pod \"redhat-operators-65sb7\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.969842 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-utilities\") pod \"redhat-operators-65sb7\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:33 crc kubenswrapper[4492]: I1126 08:00:33.992285 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dk9qv\" (UniqueName: \"kubernetes.io/projected/dbfebb13-8fa0-4856-935d-51e14bfaea7f-kube-api-access-dk9qv\") pod \"redhat-operators-65sb7\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:34 crc kubenswrapper[4492]: I1126 08:00:34.036187 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:34 crc kubenswrapper[4492]: I1126 08:00:34.463144 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-65sb7"] Nov 26 08:00:34 crc kubenswrapper[4492]: I1126 08:00:34.988282 4492 generic.go:334] "Generic (PLEG): container finished" podID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerID="30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03" exitCode=0 Nov 26 08:00:34 crc kubenswrapper[4492]: I1126 08:00:34.988325 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65sb7" event={"ID":"dbfebb13-8fa0-4856-935d-51e14bfaea7f","Type":"ContainerDied","Data":"30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03"} Nov 26 08:00:34 crc kubenswrapper[4492]: I1126 08:00:34.988350 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65sb7" event={"ID":"dbfebb13-8fa0-4856-935d-51e14bfaea7f","Type":"ContainerStarted","Data":"123981abcfd3cc47ed144b869ba252e67a88155e11c4bb12905994b1d928b838"} Nov 26 08:00:37 crc kubenswrapper[4492]: I1126 08:00:37.009930 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65sb7" event={"ID":"dbfebb13-8fa0-4856-935d-51e14bfaea7f","Type":"ContainerStarted","Data":"0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588"} Nov 26 08:00:39 crc kubenswrapper[4492]: I1126 08:00:39.028862 4492 generic.go:334] "Generic (PLEG): container finished" podID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerID="0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588" exitCode=0 Nov 26 08:00:39 crc kubenswrapper[4492]: I1126 08:00:39.029220 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65sb7" event={"ID":"dbfebb13-8fa0-4856-935d-51e14bfaea7f","Type":"ContainerDied","Data":"0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588"} Nov 26 08:00:40 crc kubenswrapper[4492]: I1126 08:00:40.042919 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65sb7" event={"ID":"dbfebb13-8fa0-4856-935d-51e14bfaea7f","Type":"ContainerStarted","Data":"01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8"} Nov 26 08:00:40 crc kubenswrapper[4492]: I1126 08:00:40.071338 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-65sb7" podStartSLOduration=2.443758814 podStartE2EDuration="7.071312293s" podCreationTimestamp="2025-11-26 08:00:33 +0000 UTC" firstStartedPulling="2025-11-26 08:00:34.989869455 +0000 UTC m=+4330.873757753" lastFinishedPulling="2025-11-26 08:00:39.617422934 +0000 UTC m=+4335.501311232" observedRunningTime="2025-11-26 08:00:40.061381881 +0000 UTC m=+4335.945270179" watchObservedRunningTime="2025-11-26 08:00:40.071312293 +0000 UTC m=+4335.955200591" Nov 26 08:00:44 crc kubenswrapper[4492]: I1126 08:00:44.037376 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:44 crc kubenswrapper[4492]: I1126 08:00:44.038134 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:44 crc kubenswrapper[4492]: I1126 08:00:44.445343 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:00:44 crc kubenswrapper[4492]: E1126 08:00:44.445681 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:00:45 crc kubenswrapper[4492]: I1126 08:00:45.094121 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-65sb7" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerName="registry-server" probeResult="failure" output=< Nov 26 08:00:45 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:00:45 crc kubenswrapper[4492]: > Nov 26 08:00:54 crc kubenswrapper[4492]: I1126 08:00:54.077885 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:54 crc kubenswrapper[4492]: I1126 08:00:54.125639 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:54 crc kubenswrapper[4492]: I1126 08:00:54.324125 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-65sb7"] Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.178046 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-65sb7" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerName="registry-server" containerID="cri-o://01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8" gracePeriod=2 Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.694564 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.856308 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dk9qv\" (UniqueName: \"kubernetes.io/projected/dbfebb13-8fa0-4856-935d-51e14bfaea7f-kube-api-access-dk9qv\") pod \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.856365 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-utilities\") pod \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.856432 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-catalog-content\") pod \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\" (UID: \"dbfebb13-8fa0-4856-935d-51e14bfaea7f\") " Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.857456 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-utilities" (OuterVolumeSpecName: "utilities") pod "dbfebb13-8fa0-4856-935d-51e14bfaea7f" (UID: "dbfebb13-8fa0-4856-935d-51e14bfaea7f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.872932 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbfebb13-8fa0-4856-935d-51e14bfaea7f-kube-api-access-dk9qv" (OuterVolumeSpecName: "kube-api-access-dk9qv") pod "dbfebb13-8fa0-4856-935d-51e14bfaea7f" (UID: "dbfebb13-8fa0-4856-935d-51e14bfaea7f"). InnerVolumeSpecName "kube-api-access-dk9qv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.923924 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dbfebb13-8fa0-4856-935d-51e14bfaea7f" (UID: "dbfebb13-8fa0-4856-935d-51e14bfaea7f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.959562 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.959599 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dk9qv\" (UniqueName: \"kubernetes.io/projected/dbfebb13-8fa0-4856-935d-51e14bfaea7f-kube-api-access-dk9qv\") on node \"crc\" DevicePath \"\"" Nov 26 08:00:55 crc kubenswrapper[4492]: I1126 08:00:55.959612 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbfebb13-8fa0-4856-935d-51e14bfaea7f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.192420 4492 generic.go:334] "Generic (PLEG): container finished" podID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerID="01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8" exitCode=0 Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.192684 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65sb7" event={"ID":"dbfebb13-8fa0-4856-935d-51e14bfaea7f","Type":"ContainerDied","Data":"01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8"} Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.192847 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-65sb7" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.192916 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-65sb7" event={"ID":"dbfebb13-8fa0-4856-935d-51e14bfaea7f","Type":"ContainerDied","Data":"123981abcfd3cc47ed144b869ba252e67a88155e11c4bb12905994b1d928b838"} Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.192957 4492 scope.go:117] "RemoveContainer" containerID="01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.224032 4492 scope.go:117] "RemoveContainer" containerID="0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.239597 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-65sb7"] Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.245899 4492 scope.go:117] "RemoveContainer" containerID="30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.246588 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-65sb7"] Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.279213 4492 scope.go:117] "RemoveContainer" containerID="01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8" Nov 26 08:00:56 crc kubenswrapper[4492]: E1126 08:00:56.281256 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8\": container with ID starting with 01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8 not found: ID does not exist" containerID="01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.281312 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8"} err="failed to get container status \"01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8\": rpc error: code = NotFound desc = could not find container \"01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8\": container with ID starting with 01f251aca854bea63e86c76baa364af0ff6c12358c6c340c9e80015f1cc92bd8 not found: ID does not exist" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.281348 4492 scope.go:117] "RemoveContainer" containerID="0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588" Nov 26 08:00:56 crc kubenswrapper[4492]: E1126 08:00:56.281719 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588\": container with ID starting with 0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588 not found: ID does not exist" containerID="0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.281779 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588"} err="failed to get container status \"0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588\": rpc error: code = NotFound desc = could not find container \"0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588\": container with ID starting with 0974dbaffe78a5ebdeea9b37d2cf67f6e60eb7e52133ba98d626e57374ace588 not found: ID does not exist" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.281820 4492 scope.go:117] "RemoveContainer" containerID="30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03" Nov 26 08:00:56 crc kubenswrapper[4492]: E1126 08:00:56.282150 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03\": container with ID starting with 30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03 not found: ID does not exist" containerID="30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.282208 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03"} err="failed to get container status \"30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03\": rpc error: code = NotFound desc = could not find container \"30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03\": container with ID starting with 30d9c1e85f41fb61f4862eb4a6115391ec6a7b8609d7a8f1c4e21342ed1e9d03 not found: ID does not exist" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.439103 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:00:56 crc kubenswrapper[4492]: E1126 08:00:56.439721 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:00:56 crc kubenswrapper[4492]: I1126 08:00:56.453378 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" path="/var/lib/kubelet/pods/dbfebb13-8fa0-4856-935d-51e14bfaea7f/volumes" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.167024 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29402401-lmsm6"] Nov 26 08:01:00 crc kubenswrapper[4492]: E1126 08:01:00.167998 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerName="extract-utilities" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.170219 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerName="extract-utilities" Nov 26 08:01:00 crc kubenswrapper[4492]: E1126 08:01:00.170267 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerName="extract-content" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.170277 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerName="extract-content" Nov 26 08:01:00 crc kubenswrapper[4492]: E1126 08:01:00.170311 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerName="registry-server" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.170317 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerName="registry-server" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.170553 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbfebb13-8fa0-4856-935d-51e14bfaea7f" containerName="registry-server" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.171230 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.192310 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402401-lmsm6"] Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.252229 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-config-data\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.252305 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-combined-ca-bundle\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.252365 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmzlr\" (UniqueName: \"kubernetes.io/projected/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-kube-api-access-wmzlr\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.252381 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-fernet-keys\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.353923 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-config-data\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.354006 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-combined-ca-bundle\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.354091 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmzlr\" (UniqueName: \"kubernetes.io/projected/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-kube-api-access-wmzlr\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.354110 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-fernet-keys\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.362045 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-combined-ca-bundle\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.362854 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-fernet-keys\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.363911 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-config-data\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.368733 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmzlr\" (UniqueName: \"kubernetes.io/projected/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-kube-api-access-wmzlr\") pod \"keystone-cron-29402401-lmsm6\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.468901 4492 scope.go:117] "RemoveContainer" containerID="0057352d6d6f7dc49a3a43dc4de6c73d7915f2cc7e378ea0cdab4c0254d2338a" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.492226 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:00 crc kubenswrapper[4492]: I1126 08:01:00.942218 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402401-lmsm6"] Nov 26 08:01:01 crc kubenswrapper[4492]: I1126 08:01:01.230935 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402401-lmsm6" event={"ID":"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf","Type":"ContainerStarted","Data":"d6fd24c74055c19062cf61c917a09af6aac0e0449f9673e97d0b7ddbf5bf7ba8"} Nov 26 08:01:01 crc kubenswrapper[4492]: I1126 08:01:01.231252 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402401-lmsm6" event={"ID":"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf","Type":"ContainerStarted","Data":"5bfd51a4f0841737689c855de0c94d769848ce49502b13dfdb2f43072600cd1b"} Nov 26 08:01:01 crc kubenswrapper[4492]: I1126 08:01:01.246963 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29402401-lmsm6" podStartSLOduration=1.24694606 podStartE2EDuration="1.24694606s" podCreationTimestamp="2025-11-26 08:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:01:01.244012744 +0000 UTC m=+4357.127901043" watchObservedRunningTime="2025-11-26 08:01:01.24694606 +0000 UTC m=+4357.130834358" Nov 26 08:01:04 crc kubenswrapper[4492]: I1126 08:01:04.253003 4492 generic.go:334] "Generic (PLEG): container finished" podID="1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf" containerID="d6fd24c74055c19062cf61c917a09af6aac0e0449f9673e97d0b7ddbf5bf7ba8" exitCode=0 Nov 26 08:01:04 crc kubenswrapper[4492]: I1126 08:01:04.253117 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402401-lmsm6" event={"ID":"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf","Type":"ContainerDied","Data":"d6fd24c74055c19062cf61c917a09af6aac0e0449f9673e97d0b7ddbf5bf7ba8"} Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.529410 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.660443 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-combined-ca-bundle\") pod \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.660488 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-config-data\") pod \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.660509 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-fernet-keys\") pod \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.660641 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmzlr\" (UniqueName: \"kubernetes.io/projected/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-kube-api-access-wmzlr\") pod \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\" (UID: \"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf\") " Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.666626 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf" (UID: "1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.666776 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-kube-api-access-wmzlr" (OuterVolumeSpecName: "kube-api-access-wmzlr") pod "1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf" (UID: "1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf"). InnerVolumeSpecName "kube-api-access-wmzlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.686295 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf" (UID: "1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.701865 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-config-data" (OuterVolumeSpecName: "config-data") pod "1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf" (UID: "1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.763399 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmzlr\" (UniqueName: \"kubernetes.io/projected/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-kube-api-access-wmzlr\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.763434 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.763445 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:05 crc kubenswrapper[4492]: I1126 08:01:05.763454 4492 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 08:01:06 crc kubenswrapper[4492]: I1126 08:01:06.268226 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402401-lmsm6" event={"ID":"1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf","Type":"ContainerDied","Data":"5bfd51a4f0841737689c855de0c94d769848ce49502b13dfdb2f43072600cd1b"} Nov 26 08:01:06 crc kubenswrapper[4492]: I1126 08:01:06.268268 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402401-lmsm6" Nov 26 08:01:06 crc kubenswrapper[4492]: I1126 08:01:06.268549 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5bfd51a4f0841737689c855de0c94d769848ce49502b13dfdb2f43072600cd1b" Nov 26 08:01:10 crc kubenswrapper[4492]: I1126 08:01:10.438554 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:01:10 crc kubenswrapper[4492]: E1126 08:01:10.439065 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:01:21 crc kubenswrapper[4492]: I1126 08:01:21.437834 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:01:21 crc kubenswrapper[4492]: E1126 08:01:21.438569 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:01:35 crc kubenswrapper[4492]: I1126 08:01:35.438457 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:01:35 crc kubenswrapper[4492]: E1126 08:01:35.439259 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:01:50 crc kubenswrapper[4492]: I1126 08:01:50.439284 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:01:50 crc kubenswrapper[4492]: E1126 08:01:50.439827 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:02:05 crc kubenswrapper[4492]: I1126 08:02:05.438864 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:02:05 crc kubenswrapper[4492]: E1126 08:02:05.439618 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:02:17 crc kubenswrapper[4492]: I1126 08:02:17.438908 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:02:17 crc kubenswrapper[4492]: E1126 08:02:17.439531 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:02:26 crc kubenswrapper[4492]: I1126 08:02:26.044092 4492 generic.go:334] "Generic (PLEG): container finished" podID="63d7860b-0c48-4075-8658-58e2567d8abf" containerID="f78b7e4b129d84da005fdbeae417db774cdf0e1b093c61453a424eaa20c49f8d" exitCode=0 Nov 26 08:02:26 crc kubenswrapper[4492]: I1126 08:02:26.044312 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"63d7860b-0c48-4075-8658-58e2567d8abf","Type":"ContainerDied","Data":"f78b7e4b129d84da005fdbeae417db774cdf0e1b093c61453a424eaa20c49f8d"} Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.515916 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.612413 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest-s01-single-thread-testing"] Nov 26 08:02:27 crc kubenswrapper[4492]: E1126 08:02:27.612806 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d7860b-0c48-4075-8658-58e2567d8abf" containerName="tempest-tests-tempest-tests-runner" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.612825 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d7860b-0c48-4075-8658-58e2567d8abf" containerName="tempest-tests-tempest-tests-runner" Nov 26 08:02:27 crc kubenswrapper[4492]: E1126 08:02:27.612856 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf" containerName="keystone-cron" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.612864 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf" containerName="keystone-cron" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.613038 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="63d7860b-0c48-4075-8658-58e2567d8abf" containerName="tempest-tests-tempest-tests-runner" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.613057 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e964dc0-0cf3-4c83-b4a8-2294c5bc77cf" containerName="keystone-cron" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.613634 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.621386 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s1" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.621392 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s1" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.622856 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s01-single-thread-testing"] Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.643338 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config-secret\") pod \"63d7860b-0c48-4075-8658-58e2567d8abf\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.643442 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ca-certs\") pod \"63d7860b-0c48-4075-8658-58e2567d8abf\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.643569 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ssh-key\") pod \"63d7860b-0c48-4075-8658-58e2567d8abf\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.643657 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config\") pod \"63d7860b-0c48-4075-8658-58e2567d8abf\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.643685 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-temporary\") pod \"63d7860b-0c48-4075-8658-58e2567d8abf\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.643719 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scc7s\" (UniqueName: \"kubernetes.io/projected/63d7860b-0c48-4075-8658-58e2567d8abf-kube-api-access-scc7s\") pod \"63d7860b-0c48-4075-8658-58e2567d8abf\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.643740 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"63d7860b-0c48-4075-8658-58e2567d8abf\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.643776 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-workdir\") pod \"63d7860b-0c48-4075-8658-58e2567d8abf\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.643812 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-config-data\") pod \"63d7860b-0c48-4075-8658-58e2567d8abf\" (UID: \"63d7860b-0c48-4075-8658-58e2567d8abf\") " Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.645364 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-config-data" (OuterVolumeSpecName: "config-data") pod "63d7860b-0c48-4075-8658-58e2567d8abf" (UID: "63d7860b-0c48-4075-8658-58e2567d8abf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.645847 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "63d7860b-0c48-4075-8658-58e2567d8abf" (UID: "63d7860b-0c48-4075-8658-58e2567d8abf"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.655677 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63d7860b-0c48-4075-8658-58e2567d8abf-kube-api-access-scc7s" (OuterVolumeSpecName: "kube-api-access-scc7s") pod "63d7860b-0c48-4075-8658-58e2567d8abf" (UID: "63d7860b-0c48-4075-8658-58e2567d8abf"). InnerVolumeSpecName "kube-api-access-scc7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.658779 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "test-operator-logs") pod "63d7860b-0c48-4075-8658-58e2567d8abf" (UID: "63d7860b-0c48-4075-8658-58e2567d8abf"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.670229 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "63d7860b-0c48-4075-8658-58e2567d8abf" (UID: "63d7860b-0c48-4075-8658-58e2567d8abf"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.675718 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "63d7860b-0c48-4075-8658-58e2567d8abf" (UID: "63d7860b-0c48-4075-8658-58e2567d8abf"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.695096 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "63d7860b-0c48-4075-8658-58e2567d8abf" (UID: "63d7860b-0c48-4075-8658-58e2567d8abf"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.719120 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "63d7860b-0c48-4075-8658-58e2567d8abf" (UID: "63d7860b-0c48-4075-8658-58e2567d8abf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.721093 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "63d7860b-0c48-4075-8658-58e2567d8abf" (UID: "63d7860b-0c48-4075-8658-58e2567d8abf"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.745513 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.745739 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ssh-key\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.745781 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.745831 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-config-data\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.745853 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746104 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k94mg\" (UniqueName: \"kubernetes.io/projected/a7961ca9-f70f-4cb7-97c3-440a53316f29-kube-api-access-k94mg\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746288 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746327 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ca-certs\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746372 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746529 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746580 4492 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746617 4492 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746640 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scc7s\" (UniqueName: \"kubernetes.io/projected/63d7860b-0c48-4075-8658-58e2567d8abf-kube-api-access-scc7s\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746654 4492 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/63d7860b-0c48-4075-8658-58e2567d8abf-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746665 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63d7860b-0c48-4075-8658-58e2567d8abf-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746675 4492 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.746685 4492 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/63d7860b-0c48-4075-8658-58e2567d8abf-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.767198 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.848337 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k94mg\" (UniqueName: \"kubernetes.io/projected/a7961ca9-f70f-4cb7-97c3-440a53316f29-kube-api-access-k94mg\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.848425 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.848454 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ca-certs\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.848479 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.848935 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.849048 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.849852 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.850256 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ssh-key\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.850295 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.850327 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-config-data\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.851142 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.851387 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-config-data\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.852336 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ca-certs\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.852907 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.853774 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ssh-key\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.863462 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k94mg\" (UniqueName: \"kubernetes.io/projected/a7961ca9-f70f-4cb7-97c3-440a53316f29-kube-api-access-k94mg\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:27 crc kubenswrapper[4492]: I1126 08:02:27.929821 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 08:02:28 crc kubenswrapper[4492]: I1126 08:02:28.062146 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"63d7860b-0c48-4075-8658-58e2567d8abf","Type":"ContainerDied","Data":"5546b5d42a9cf2e18a3c7760344aa0baddd56a74468acb3f7496682f153e9c46"} Nov 26 08:02:28 crc kubenswrapper[4492]: I1126 08:02:28.062400 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5546b5d42a9cf2e18a3c7760344aa0baddd56a74468acb3f7496682f153e9c46" Nov 26 08:02:28 crc kubenswrapper[4492]: I1126 08:02:28.062208 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Nov 26 08:02:28 crc kubenswrapper[4492]: I1126 08:02:28.638250 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s01-single-thread-testing"] Nov 26 08:02:28 crc kubenswrapper[4492]: W1126 08:02:28.647562 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7961ca9_f70f_4cb7_97c3_440a53316f29.slice/crio-4834cbf127ee0534b76a2a93406830d34e0bf77096d47b3c2bf46627ef2267d0 WatchSource:0}: Error finding container 4834cbf127ee0534b76a2a93406830d34e0bf77096d47b3c2bf46627ef2267d0: Status 404 returned error can't find the container with id 4834cbf127ee0534b76a2a93406830d34e0bf77096d47b3c2bf46627ef2267d0 Nov 26 08:02:29 crc kubenswrapper[4492]: I1126 08:02:29.074442 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"a7961ca9-f70f-4cb7-97c3-440a53316f29","Type":"ContainerStarted","Data":"4834cbf127ee0534b76a2a93406830d34e0bf77096d47b3c2bf46627ef2267d0"} Nov 26 08:02:31 crc kubenswrapper[4492]: I1126 08:02:31.089584 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"a7961ca9-f70f-4cb7-97c3-440a53316f29","Type":"ContainerStarted","Data":"37fbe7fded7ea0aca54f988801f6cfb0979e06b162f2afd909aacb855185a038"} Nov 26 08:02:31 crc kubenswrapper[4492]: I1126 08:02:31.108080 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" podStartSLOduration=4.108065591 podStartE2EDuration="4.108065591s" podCreationTimestamp="2025-11-26 08:02:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:02:31.102800752 +0000 UTC m=+4446.986689050" watchObservedRunningTime="2025-11-26 08:02:31.108065591 +0000 UTC m=+4446.991953889" Nov 26 08:02:31 crc kubenswrapper[4492]: I1126 08:02:31.438356 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:02:31 crc kubenswrapper[4492]: E1126 08:02:31.438599 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:02:42 crc kubenswrapper[4492]: I1126 08:02:42.439459 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:02:42 crc kubenswrapper[4492]: E1126 08:02:42.440202 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:02:54 crc kubenswrapper[4492]: I1126 08:02:54.444095 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:02:54 crc kubenswrapper[4492]: E1126 08:02:54.444676 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:03:06 crc kubenswrapper[4492]: I1126 08:03:06.439381 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:03:06 crc kubenswrapper[4492]: E1126 08:03:06.439905 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.143649 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5fbdc58545-c2wg9"] Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.146167 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.161470 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5fbdc58545-c2wg9"] Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.281538 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-public-tls-certs\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.281646 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2nv2\" (UniqueName: \"kubernetes.io/projected/212dbc36-1b4c-4add-953b-70392589a53e-kube-api-access-g2nv2\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.281701 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-httpd-config\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.281738 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-ovndb-tls-certs\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.281759 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-config\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.281804 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-internal-tls-certs\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.281818 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-combined-ca-bundle\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.383115 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-public-tls-certs\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.383401 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2nv2\" (UniqueName: \"kubernetes.io/projected/212dbc36-1b4c-4add-953b-70392589a53e-kube-api-access-g2nv2\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.383512 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-httpd-config\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.384004 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-ovndb-tls-certs\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.384105 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-config\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.384214 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-internal-tls-certs\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.384700 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-combined-ca-bundle\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.388830 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-public-tls-certs\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.389790 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-httpd-config\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.390034 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-combined-ca-bundle\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.390678 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-internal-tls-certs\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.397818 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2nv2\" (UniqueName: \"kubernetes.io/projected/212dbc36-1b4c-4add-953b-70392589a53e-kube-api-access-g2nv2\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.398512 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-ovndb-tls-certs\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.406396 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-config\") pod \"neutron-5fbdc58545-c2wg9\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.460928 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:15 crc kubenswrapper[4492]: I1126 08:03:15.965475 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5fbdc58545-c2wg9"] Nov 26 08:03:16 crc kubenswrapper[4492]: I1126 08:03:16.432625 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fbdc58545-c2wg9" event={"ID":"212dbc36-1b4c-4add-953b-70392589a53e","Type":"ContainerStarted","Data":"619eb3041b12c862d0b441935fc9fc1a1c86130707ac4f001f31f338711f9b6b"} Nov 26 08:03:16 crc kubenswrapper[4492]: I1126 08:03:16.432857 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fbdc58545-c2wg9" event={"ID":"212dbc36-1b4c-4add-953b-70392589a53e","Type":"ContainerStarted","Data":"d7744fa3c710939ec5016891f5f34c605135bc6a67488c47ad9a618a29aa496e"} Nov 26 08:03:16 crc kubenswrapper[4492]: I1126 08:03:16.433794 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:16 crc kubenswrapper[4492]: I1126 08:03:16.433841 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fbdc58545-c2wg9" event={"ID":"212dbc36-1b4c-4add-953b-70392589a53e","Type":"ContainerStarted","Data":"ff55702af4ca74dda865b393806d543b28810a2d8fb2b0f2c4e6c9bbbe56a572"} Nov 26 08:03:16 crc kubenswrapper[4492]: I1126 08:03:16.451057 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5fbdc58545-c2wg9" podStartSLOduration=1.4510428690000001 podStartE2EDuration="1.451042869s" podCreationTimestamp="2025-11-26 08:03:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:03:16.446005369 +0000 UTC m=+4492.329893666" watchObservedRunningTime="2025-11-26 08:03:16.451042869 +0000 UTC m=+4492.334931167" Nov 26 08:03:21 crc kubenswrapper[4492]: I1126 08:03:21.438063 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:03:21 crc kubenswrapper[4492]: E1126 08:03:21.438731 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:03:35 crc kubenswrapper[4492]: I1126 08:03:35.438378 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:03:35 crc kubenswrapper[4492]: E1126 08:03:35.439238 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:03:45 crc kubenswrapper[4492]: I1126 08:03:45.476811 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:03:45 crc kubenswrapper[4492]: I1126 08:03:45.573237 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5f9588bddf-km75q"] Nov 26 08:03:45 crc kubenswrapper[4492]: I1126 08:03:45.573586 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5f9588bddf-km75q" podUID="7c875225-0af5-49c4-b273-85ed6c498f18" containerName="neutron-api" containerID="cri-o://c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775" gracePeriod=30 Nov 26 08:03:45 crc kubenswrapper[4492]: I1126 08:03:45.574208 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5f9588bddf-km75q" podUID="7c875225-0af5-49c4-b273-85ed6c498f18" containerName="neutron-httpd" containerID="cri-o://be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1" gracePeriod=30 Nov 26 08:03:46 crc kubenswrapper[4492]: I1126 08:03:46.685402 4492 generic.go:334] "Generic (PLEG): container finished" podID="7c875225-0af5-49c4-b273-85ed6c498f18" containerID="be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1" exitCode=0 Nov 26 08:03:46 crc kubenswrapper[4492]: I1126 08:03:46.685912 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f9588bddf-km75q" event={"ID":"7c875225-0af5-49c4-b273-85ed6c498f18","Type":"ContainerDied","Data":"be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1"} Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.187339 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7tfb9"] Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.190817 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.203194 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7tfb9"] Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.243766 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-catalog-content\") pod \"community-operators-7tfb9\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.243944 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxqbt\" (UniqueName: \"kubernetes.io/projected/69059ca2-6549-443a-ab72-ce22b65017b8-kube-api-access-gxqbt\") pod \"community-operators-7tfb9\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.244151 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-utilities\") pod \"community-operators-7tfb9\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.346412 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxqbt\" (UniqueName: \"kubernetes.io/projected/69059ca2-6549-443a-ab72-ce22b65017b8-kube-api-access-gxqbt\") pod \"community-operators-7tfb9\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.346499 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-utilities\") pod \"community-operators-7tfb9\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.346619 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-catalog-content\") pod \"community-operators-7tfb9\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.347018 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-catalog-content\") pod \"community-operators-7tfb9\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.347275 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-utilities\") pod \"community-operators-7tfb9\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.374056 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxqbt\" (UniqueName: \"kubernetes.io/projected/69059ca2-6549-443a-ab72-ce22b65017b8-kube-api-access-gxqbt\") pod \"community-operators-7tfb9\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:49 crc kubenswrapper[4492]: I1126 08:03:49.510091 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:50 crc kubenswrapper[4492]: I1126 08:03:50.018979 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7tfb9"] Nov 26 08:03:50 crc kubenswrapper[4492]: W1126 08:03:50.026265 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69059ca2_6549_443a_ab72_ce22b65017b8.slice/crio-95c8fb5a8ed32b1903bf89f7d0a2390a2a12bb4e6f1f38b93181f877bd03b283 WatchSource:0}: Error finding container 95c8fb5a8ed32b1903bf89f7d0a2390a2a12bb4e6f1f38b93181f877bd03b283: Status 404 returned error can't find the container with id 95c8fb5a8ed32b1903bf89f7d0a2390a2a12bb4e6f1f38b93181f877bd03b283 Nov 26 08:03:50 crc kubenswrapper[4492]: I1126 08:03:50.438773 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:03:50 crc kubenswrapper[4492]: E1126 08:03:50.439076 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:03:50 crc kubenswrapper[4492]: I1126 08:03:50.743934 4492 generic.go:334] "Generic (PLEG): container finished" podID="69059ca2-6549-443a-ab72-ce22b65017b8" containerID="0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3" exitCode=0 Nov 26 08:03:50 crc kubenswrapper[4492]: I1126 08:03:50.744014 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tfb9" event={"ID":"69059ca2-6549-443a-ab72-ce22b65017b8","Type":"ContainerDied","Data":"0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3"} Nov 26 08:03:50 crc kubenswrapper[4492]: I1126 08:03:50.744804 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tfb9" event={"ID":"69059ca2-6549-443a-ab72-ce22b65017b8","Type":"ContainerStarted","Data":"95c8fb5a8ed32b1903bf89f7d0a2390a2a12bb4e6f1f38b93181f877bd03b283"} Nov 26 08:03:50 crc kubenswrapper[4492]: I1126 08:03:50.761112 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:03:52 crc kubenswrapper[4492]: I1126 08:03:52.762730 4492 generic.go:334] "Generic (PLEG): container finished" podID="69059ca2-6549-443a-ab72-ce22b65017b8" containerID="4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594" exitCode=0 Nov 26 08:03:52 crc kubenswrapper[4492]: I1126 08:03:52.763052 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tfb9" event={"ID":"69059ca2-6549-443a-ab72-ce22b65017b8","Type":"ContainerDied","Data":"4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594"} Nov 26 08:03:54 crc kubenswrapper[4492]: I1126 08:03:54.784614 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tfb9" event={"ID":"69059ca2-6549-443a-ab72-ce22b65017b8","Type":"ContainerStarted","Data":"69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e"} Nov 26 08:03:54 crc kubenswrapper[4492]: I1126 08:03:54.819889 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7tfb9" podStartSLOduration=3.274125662 podStartE2EDuration="5.819846882s" podCreationTimestamp="2025-11-26 08:03:49 +0000 UTC" firstStartedPulling="2025-11-26 08:03:50.759362603 +0000 UTC m=+4526.643250902" lastFinishedPulling="2025-11-26 08:03:53.305083823 +0000 UTC m=+4529.188972122" observedRunningTime="2025-11-26 08:03:54.801899497 +0000 UTC m=+4530.685787796" watchObservedRunningTime="2025-11-26 08:03:54.819846882 +0000 UTC m=+4530.703735181" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.512226 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.512948 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.561540 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.719157 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f9588bddf-km75q" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.812870 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-combined-ca-bundle\") pod \"7c875225-0af5-49c4-b273-85ed6c498f18\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.813140 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-ovndb-tls-certs\") pod \"7c875225-0af5-49c4-b273-85ed6c498f18\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.813310 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxnrg\" (UniqueName: \"kubernetes.io/projected/7c875225-0af5-49c4-b273-85ed6c498f18-kube-api-access-rxnrg\") pod \"7c875225-0af5-49c4-b273-85ed6c498f18\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.813387 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-public-tls-certs\") pod \"7c875225-0af5-49c4-b273-85ed6c498f18\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.813596 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-config\") pod \"7c875225-0af5-49c4-b273-85ed6c498f18\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.813711 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-internal-tls-certs\") pod \"7c875225-0af5-49c4-b273-85ed6c498f18\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.813821 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-httpd-config\") pod \"7c875225-0af5-49c4-b273-85ed6c498f18\" (UID: \"7c875225-0af5-49c4-b273-85ed6c498f18\") " Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.831242 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c875225-0af5-49c4-b273-85ed6c498f18-kube-api-access-rxnrg" (OuterVolumeSpecName: "kube-api-access-rxnrg") pod "7c875225-0af5-49c4-b273-85ed6c498f18" (UID: "7c875225-0af5-49c4-b273-85ed6c498f18"). InnerVolumeSpecName "kube-api-access-rxnrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.834941 4492 generic.go:334] "Generic (PLEG): container finished" podID="7c875225-0af5-49c4-b273-85ed6c498f18" containerID="c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775" exitCode=0 Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.835019 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f9588bddf-km75q" event={"ID":"7c875225-0af5-49c4-b273-85ed6c498f18","Type":"ContainerDied","Data":"c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775"} Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.835075 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f9588bddf-km75q" event={"ID":"7c875225-0af5-49c4-b273-85ed6c498f18","Type":"ContainerDied","Data":"b8bdf71a4288fd2da3f7ff415135962a77b29f00aa12ca62a7da9609a83c8b7b"} Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.835094 4492 scope.go:117] "RemoveContainer" containerID="be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.835221 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f9588bddf-km75q" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.839626 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "7c875225-0af5-49c4-b273-85ed6c498f18" (UID: "7c875225-0af5-49c4-b273-85ed6c498f18"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.858043 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c875225-0af5-49c4-b273-85ed6c498f18" (UID: "7c875225-0af5-49c4-b273-85ed6c498f18"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.866680 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7c875225-0af5-49c4-b273-85ed6c498f18" (UID: "7c875225-0af5-49c4-b273-85ed6c498f18"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.866936 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-config" (OuterVolumeSpecName: "config") pod "7c875225-0af5-49c4-b273-85ed6c498f18" (UID: "7c875225-0af5-49c4-b273-85ed6c498f18"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.871379 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7c875225-0af5-49c4-b273-85ed6c498f18" (UID: "7c875225-0af5-49c4-b273-85ed6c498f18"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.879008 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.894066 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "7c875225-0af5-49c4-b273-85ed6c498f18" (UID: "7c875225-0af5-49c4-b273-85ed6c498f18"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.916520 4492 scope.go:117] "RemoveContainer" containerID="c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.921381 4492 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.921460 4492 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.921476 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.921489 4492 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.921506 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxnrg\" (UniqueName: \"kubernetes.io/projected/7c875225-0af5-49c4-b273-85ed6c498f18-kube-api-access-rxnrg\") on node \"crc\" DevicePath \"\"" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.921522 4492 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.921533 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7c875225-0af5-49c4-b273-85ed6c498f18-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.935985 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7tfb9"] Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.946461 4492 scope.go:117] "RemoveContainer" containerID="be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1" Nov 26 08:03:59 crc kubenswrapper[4492]: E1126 08:03:59.947302 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1\": container with ID starting with be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1 not found: ID does not exist" containerID="be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.947414 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1"} err="failed to get container status \"be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1\": rpc error: code = NotFound desc = could not find container \"be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1\": container with ID starting with be48dc4add9237f59dba9cdc87b34e1c170d387394fd55601a0311acbc5423b1 not found: ID does not exist" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.947499 4492 scope.go:117] "RemoveContainer" containerID="c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775" Nov 26 08:03:59 crc kubenswrapper[4492]: E1126 08:03:59.947970 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775\": container with ID starting with c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775 not found: ID does not exist" containerID="c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775" Nov 26 08:03:59 crc kubenswrapper[4492]: I1126 08:03:59.948001 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775"} err="failed to get container status \"c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775\": rpc error: code = NotFound desc = could not find container \"c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775\": container with ID starting with c7546407b5fa0442bb401376869bf4094a121be1f6dc9fc2fb124bf943eb4775 not found: ID does not exist" Nov 26 08:04:00 crc kubenswrapper[4492]: I1126 08:04:00.168474 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5f9588bddf-km75q"] Nov 26 08:04:00 crc kubenswrapper[4492]: I1126 08:04:00.174982 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5f9588bddf-km75q"] Nov 26 08:04:00 crc kubenswrapper[4492]: I1126 08:04:00.453883 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c875225-0af5-49c4-b273-85ed6c498f18" path="/var/lib/kubelet/pods/7c875225-0af5-49c4-b273-85ed6c498f18/volumes" Nov 26 08:04:01 crc kubenswrapper[4492]: I1126 08:04:01.851731 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7tfb9" podUID="69059ca2-6549-443a-ab72-ce22b65017b8" containerName="registry-server" containerID="cri-o://69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e" gracePeriod=2 Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.314267 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.492824 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxqbt\" (UniqueName: \"kubernetes.io/projected/69059ca2-6549-443a-ab72-ce22b65017b8-kube-api-access-gxqbt\") pod \"69059ca2-6549-443a-ab72-ce22b65017b8\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.492950 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-utilities\") pod \"69059ca2-6549-443a-ab72-ce22b65017b8\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.493102 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-catalog-content\") pod \"69059ca2-6549-443a-ab72-ce22b65017b8\" (UID: \"69059ca2-6549-443a-ab72-ce22b65017b8\") " Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.493625 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-utilities" (OuterVolumeSpecName: "utilities") pod "69059ca2-6549-443a-ab72-ce22b65017b8" (UID: "69059ca2-6549-443a-ab72-ce22b65017b8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.494231 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.500227 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69059ca2-6549-443a-ab72-ce22b65017b8-kube-api-access-gxqbt" (OuterVolumeSpecName: "kube-api-access-gxqbt") pod "69059ca2-6549-443a-ab72-ce22b65017b8" (UID: "69059ca2-6549-443a-ab72-ce22b65017b8"). InnerVolumeSpecName "kube-api-access-gxqbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.534912 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "69059ca2-6549-443a-ab72-ce22b65017b8" (UID: "69059ca2-6549-443a-ab72-ce22b65017b8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.596282 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxqbt\" (UniqueName: \"kubernetes.io/projected/69059ca2-6549-443a-ab72-ce22b65017b8-kube-api-access-gxqbt\") on node \"crc\" DevicePath \"\"" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.596313 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69059ca2-6549-443a-ab72-ce22b65017b8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.867292 4492 generic.go:334] "Generic (PLEG): container finished" podID="69059ca2-6549-443a-ab72-ce22b65017b8" containerID="69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e" exitCode=0 Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.867331 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7tfb9" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.867355 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tfb9" event={"ID":"69059ca2-6549-443a-ab72-ce22b65017b8","Type":"ContainerDied","Data":"69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e"} Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.867409 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7tfb9" event={"ID":"69059ca2-6549-443a-ab72-ce22b65017b8","Type":"ContainerDied","Data":"95c8fb5a8ed32b1903bf89f7d0a2390a2a12bb4e6f1f38b93181f877bd03b283"} Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.867431 4492 scope.go:117] "RemoveContainer" containerID="69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.930326 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7tfb9"] Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.931424 4492 scope.go:117] "RemoveContainer" containerID="4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.938435 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7tfb9"] Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.960221 4492 scope.go:117] "RemoveContainer" containerID="0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3" Nov 26 08:04:02 crc kubenswrapper[4492]: I1126 08:04:02.999676 4492 scope.go:117] "RemoveContainer" containerID="69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e" Nov 26 08:04:03 crc kubenswrapper[4492]: E1126 08:04:03.001000 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e\": container with ID starting with 69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e not found: ID does not exist" containerID="69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e" Nov 26 08:04:03 crc kubenswrapper[4492]: I1126 08:04:03.001039 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e"} err="failed to get container status \"69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e\": rpc error: code = NotFound desc = could not find container \"69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e\": container with ID starting with 69701e191ff966a3a98b99427eb74a664b6a84ffcfc7ca3a02dae5e98eedbc0e not found: ID does not exist" Nov 26 08:04:03 crc kubenswrapper[4492]: I1126 08:04:03.001081 4492 scope.go:117] "RemoveContainer" containerID="4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594" Nov 26 08:04:03 crc kubenswrapper[4492]: E1126 08:04:03.002075 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594\": container with ID starting with 4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594 not found: ID does not exist" containerID="4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594" Nov 26 08:04:03 crc kubenswrapper[4492]: I1126 08:04:03.002106 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594"} err="failed to get container status \"4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594\": rpc error: code = NotFound desc = could not find container \"4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594\": container with ID starting with 4b6617317c772f4523227c6cd4fc6f3d855204301dc854fdf0971607eb458594 not found: ID does not exist" Nov 26 08:04:03 crc kubenswrapper[4492]: I1126 08:04:03.002130 4492 scope.go:117] "RemoveContainer" containerID="0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3" Nov 26 08:04:03 crc kubenswrapper[4492]: E1126 08:04:03.002527 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3\": container with ID starting with 0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3 not found: ID does not exist" containerID="0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3" Nov 26 08:04:03 crc kubenswrapper[4492]: I1126 08:04:03.002552 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3"} err="failed to get container status \"0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3\": rpc error: code = NotFound desc = could not find container \"0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3\": container with ID starting with 0a68b6dd544868c7f483e38f9737594e4184d8de619f20d940a4ef6bb6f4f9e3 not found: ID does not exist" Nov 26 08:04:04 crc kubenswrapper[4492]: I1126 08:04:04.451482 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:04:04 crc kubenswrapper[4492]: E1126 08:04:04.452154 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:04:04 crc kubenswrapper[4492]: I1126 08:04:04.453899 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69059ca2-6549-443a-ab72-ce22b65017b8" path="/var/lib/kubelet/pods/69059ca2-6549-443a-ab72-ce22b65017b8/volumes" Nov 26 08:04:16 crc kubenswrapper[4492]: I1126 08:04:16.439414 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:04:16 crc kubenswrapper[4492]: E1126 08:04:16.441292 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:04:29 crc kubenswrapper[4492]: I1126 08:04:29.438816 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:04:29 crc kubenswrapper[4492]: E1126 08:04:29.439655 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:04:40 crc kubenswrapper[4492]: I1126 08:04:40.439335 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:04:40 crc kubenswrapper[4492]: E1126 08:04:40.439891 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:04:52 crc kubenswrapper[4492]: I1126 08:04:52.439564 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:04:53 crc kubenswrapper[4492]: I1126 08:04:53.272805 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"4294d04f48a00d21db3973cf76008e401cc1a49db778781a5749be9989d44808"} Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.895511 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6f6nw"] Nov 26 08:06:54 crc kubenswrapper[4492]: E1126 08:06:54.899715 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69059ca2-6549-443a-ab72-ce22b65017b8" containerName="extract-content" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.899753 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="69059ca2-6549-443a-ab72-ce22b65017b8" containerName="extract-content" Nov 26 08:06:54 crc kubenswrapper[4492]: E1126 08:06:54.899793 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69059ca2-6549-443a-ab72-ce22b65017b8" containerName="registry-server" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.899800 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="69059ca2-6549-443a-ab72-ce22b65017b8" containerName="registry-server" Nov 26 08:06:54 crc kubenswrapper[4492]: E1126 08:06:54.899817 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c875225-0af5-49c4-b273-85ed6c498f18" containerName="neutron-api" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.899822 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c875225-0af5-49c4-b273-85ed6c498f18" containerName="neutron-api" Nov 26 08:06:54 crc kubenswrapper[4492]: E1126 08:06:54.899863 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69059ca2-6549-443a-ab72-ce22b65017b8" containerName="extract-utilities" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.899869 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="69059ca2-6549-443a-ab72-ce22b65017b8" containerName="extract-utilities" Nov 26 08:06:54 crc kubenswrapper[4492]: E1126 08:06:54.899879 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c875225-0af5-49c4-b273-85ed6c498f18" containerName="neutron-httpd" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.899886 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c875225-0af5-49c4-b273-85ed6c498f18" containerName="neutron-httpd" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.900553 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="69059ca2-6549-443a-ab72-ce22b65017b8" containerName="registry-server" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.900591 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c875225-0af5-49c4-b273-85ed6c498f18" containerName="neutron-httpd" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.900618 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c875225-0af5-49c4-b273-85ed6c498f18" containerName="neutron-api" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.902029 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.922779 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6f6nw"] Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.982638 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-catalog-content\") pod \"redhat-marketplace-6f6nw\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.982833 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz2k2\" (UniqueName: \"kubernetes.io/projected/26225f43-35e1-471c-9061-702aedd9bd7f-kube-api-access-hz2k2\") pod \"redhat-marketplace-6f6nw\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:54 crc kubenswrapper[4492]: I1126 08:06:54.982926 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-utilities\") pod \"redhat-marketplace-6f6nw\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:55 crc kubenswrapper[4492]: I1126 08:06:55.085458 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz2k2\" (UniqueName: \"kubernetes.io/projected/26225f43-35e1-471c-9061-702aedd9bd7f-kube-api-access-hz2k2\") pod \"redhat-marketplace-6f6nw\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:55 crc kubenswrapper[4492]: I1126 08:06:55.085624 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-utilities\") pod \"redhat-marketplace-6f6nw\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:55 crc kubenswrapper[4492]: I1126 08:06:55.085792 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-catalog-content\") pod \"redhat-marketplace-6f6nw\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:55 crc kubenswrapper[4492]: I1126 08:06:55.087373 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-utilities\") pod \"redhat-marketplace-6f6nw\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:55 crc kubenswrapper[4492]: I1126 08:06:55.087555 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-catalog-content\") pod \"redhat-marketplace-6f6nw\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:55 crc kubenswrapper[4492]: I1126 08:06:55.109501 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz2k2\" (UniqueName: \"kubernetes.io/projected/26225f43-35e1-471c-9061-702aedd9bd7f-kube-api-access-hz2k2\") pod \"redhat-marketplace-6f6nw\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:55 crc kubenswrapper[4492]: I1126 08:06:55.219138 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:06:55 crc kubenswrapper[4492]: I1126 08:06:55.874727 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6f6nw"] Nov 26 08:06:56 crc kubenswrapper[4492]: I1126 08:06:56.149856 4492 generic.go:334] "Generic (PLEG): container finished" podID="26225f43-35e1-471c-9061-702aedd9bd7f" containerID="c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a" exitCode=0 Nov 26 08:06:56 crc kubenswrapper[4492]: I1126 08:06:56.149902 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6f6nw" event={"ID":"26225f43-35e1-471c-9061-702aedd9bd7f","Type":"ContainerDied","Data":"c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a"} Nov 26 08:06:56 crc kubenswrapper[4492]: I1126 08:06:56.150244 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6f6nw" event={"ID":"26225f43-35e1-471c-9061-702aedd9bd7f","Type":"ContainerStarted","Data":"9fa2fd80d2f03399d7a3f33c217f1a428f55b20c942165d9d48220e28de13e88"} Nov 26 08:06:57 crc kubenswrapper[4492]: I1126 08:06:57.165394 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6f6nw" event={"ID":"26225f43-35e1-471c-9061-702aedd9bd7f","Type":"ContainerStarted","Data":"b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb"} Nov 26 08:06:58 crc kubenswrapper[4492]: I1126 08:06:58.175665 4492 generic.go:334] "Generic (PLEG): container finished" podID="26225f43-35e1-471c-9061-702aedd9bd7f" containerID="b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb" exitCode=0 Nov 26 08:06:58 crc kubenswrapper[4492]: I1126 08:06:58.175757 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6f6nw" event={"ID":"26225f43-35e1-471c-9061-702aedd9bd7f","Type":"ContainerDied","Data":"b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb"} Nov 26 08:06:59 crc kubenswrapper[4492]: I1126 08:06:59.188597 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6f6nw" event={"ID":"26225f43-35e1-471c-9061-702aedd9bd7f","Type":"ContainerStarted","Data":"85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771"} Nov 26 08:06:59 crc kubenswrapper[4492]: I1126 08:06:59.205677 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6f6nw" podStartSLOduration=2.710198614 podStartE2EDuration="5.205665195s" podCreationTimestamp="2025-11-26 08:06:54 +0000 UTC" firstStartedPulling="2025-11-26 08:06:56.151682595 +0000 UTC m=+4712.035570883" lastFinishedPulling="2025-11-26 08:06:58.647149166 +0000 UTC m=+4714.531037464" observedRunningTime="2025-11-26 08:06:59.203226871 +0000 UTC m=+4715.087115169" watchObservedRunningTime="2025-11-26 08:06:59.205665195 +0000 UTC m=+4715.089553493" Nov 26 08:07:05 crc kubenswrapper[4492]: I1126 08:07:05.220317 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:07:05 crc kubenswrapper[4492]: I1126 08:07:05.220704 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:07:05 crc kubenswrapper[4492]: I1126 08:07:05.266778 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:07:05 crc kubenswrapper[4492]: I1126 08:07:05.317344 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:07:07 crc kubenswrapper[4492]: I1126 08:07:07.675296 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6f6nw"] Nov 26 08:07:07 crc kubenswrapper[4492]: I1126 08:07:07.675896 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6f6nw" podUID="26225f43-35e1-471c-9061-702aedd9bd7f" containerName="registry-server" containerID="cri-o://85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771" gracePeriod=2 Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.057442 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.191713 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-catalog-content\") pod \"26225f43-35e1-471c-9061-702aedd9bd7f\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.191941 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-utilities\") pod \"26225f43-35e1-471c-9061-702aedd9bd7f\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.192064 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz2k2\" (UniqueName: \"kubernetes.io/projected/26225f43-35e1-471c-9061-702aedd9bd7f-kube-api-access-hz2k2\") pod \"26225f43-35e1-471c-9061-702aedd9bd7f\" (UID: \"26225f43-35e1-471c-9061-702aedd9bd7f\") " Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.194084 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-utilities" (OuterVolumeSpecName: "utilities") pod "26225f43-35e1-471c-9061-702aedd9bd7f" (UID: "26225f43-35e1-471c-9061-702aedd9bd7f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.206633 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26225f43-35e1-471c-9061-702aedd9bd7f-kube-api-access-hz2k2" (OuterVolumeSpecName: "kube-api-access-hz2k2") pod "26225f43-35e1-471c-9061-702aedd9bd7f" (UID: "26225f43-35e1-471c-9061-702aedd9bd7f"). InnerVolumeSpecName "kube-api-access-hz2k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.223651 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "26225f43-35e1-471c-9061-702aedd9bd7f" (UID: "26225f43-35e1-471c-9061-702aedd9bd7f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.262050 4492 generic.go:334] "Generic (PLEG): container finished" podID="26225f43-35e1-471c-9061-702aedd9bd7f" containerID="85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771" exitCode=0 Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.262110 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6f6nw" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.262126 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6f6nw" event={"ID":"26225f43-35e1-471c-9061-702aedd9bd7f","Type":"ContainerDied","Data":"85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771"} Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.262361 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6f6nw" event={"ID":"26225f43-35e1-471c-9061-702aedd9bd7f","Type":"ContainerDied","Data":"9fa2fd80d2f03399d7a3f33c217f1a428f55b20c942165d9d48220e28de13e88"} Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.262381 4492 scope.go:117] "RemoveContainer" containerID="85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.281213 4492 scope.go:117] "RemoveContainer" containerID="b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.290140 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6f6nw"] Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.295682 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz2k2\" (UniqueName: \"kubernetes.io/projected/26225f43-35e1-471c-9061-702aedd9bd7f-kube-api-access-hz2k2\") on node \"crc\" DevicePath \"\"" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.295716 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.295725 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26225f43-35e1-471c-9061-702aedd9bd7f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.300057 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6f6nw"] Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.302466 4492 scope.go:117] "RemoveContainer" containerID="c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.333103 4492 scope.go:117] "RemoveContainer" containerID="85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771" Nov 26 08:07:08 crc kubenswrapper[4492]: E1126 08:07:08.333691 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771\": container with ID starting with 85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771 not found: ID does not exist" containerID="85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.333723 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771"} err="failed to get container status \"85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771\": rpc error: code = NotFound desc = could not find container \"85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771\": container with ID starting with 85dfa2eef28b414356813e674084e8246414ce44d8bdafe0207057614683b771 not found: ID does not exist" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.333746 4492 scope.go:117] "RemoveContainer" containerID="b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb" Nov 26 08:07:08 crc kubenswrapper[4492]: E1126 08:07:08.334047 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb\": container with ID starting with b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb not found: ID does not exist" containerID="b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.334066 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb"} err="failed to get container status \"b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb\": rpc error: code = NotFound desc = could not find container \"b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb\": container with ID starting with b1115bc37a92c65a07baacfb99ff9c1c3db6b9fa1ba79fa17a01461fe74957fb not found: ID does not exist" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.334078 4492 scope.go:117] "RemoveContainer" containerID="c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a" Nov 26 08:07:08 crc kubenswrapper[4492]: E1126 08:07:08.334338 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a\": container with ID starting with c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a not found: ID does not exist" containerID="c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.334356 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a"} err="failed to get container status \"c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a\": rpc error: code = NotFound desc = could not find container \"c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a\": container with ID starting with c6989b81f051e48c780bddf5e074797e0e431dc6ca9c19cab0ba2d6af06ebc8a not found: ID does not exist" Nov 26 08:07:08 crc kubenswrapper[4492]: I1126 08:07:08.447311 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26225f43-35e1-471c-9061-702aedd9bd7f" path="/var/lib/kubelet/pods/26225f43-35e1-471c-9061-702aedd9bd7f/volumes" Nov 26 08:07:19 crc kubenswrapper[4492]: I1126 08:07:19.444885 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:07:19 crc kubenswrapper[4492]: I1126 08:07:19.445825 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:07:49 crc kubenswrapper[4492]: I1126 08:07:49.441983 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:07:49 crc kubenswrapper[4492]: I1126 08:07:49.442456 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:08:19 crc kubenswrapper[4492]: I1126 08:08:19.441886 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:08:19 crc kubenswrapper[4492]: I1126 08:08:19.442747 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:08:19 crc kubenswrapper[4492]: I1126 08:08:19.442850 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:08:19 crc kubenswrapper[4492]: I1126 08:08:19.444122 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4294d04f48a00d21db3973cf76008e401cc1a49db778781a5749be9989d44808"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:08:19 crc kubenswrapper[4492]: I1126 08:08:19.444249 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://4294d04f48a00d21db3973cf76008e401cc1a49db778781a5749be9989d44808" gracePeriod=600 Nov 26 08:08:19 crc kubenswrapper[4492]: I1126 08:08:19.768734 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="4294d04f48a00d21db3973cf76008e401cc1a49db778781a5749be9989d44808" exitCode=0 Nov 26 08:08:19 crc kubenswrapper[4492]: I1126 08:08:19.768802 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"4294d04f48a00d21db3973cf76008e401cc1a49db778781a5749be9989d44808"} Nov 26 08:08:19 crc kubenswrapper[4492]: I1126 08:08:19.769148 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397"} Nov 26 08:08:19 crc kubenswrapper[4492]: I1126 08:08:19.769188 4492 scope.go:117] "RemoveContainer" containerID="662914ce15995ceb9487d63c3784ecf253e21638f72690c752d5cfd0b226bddb" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.100973 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tgkhm"] Nov 26 08:09:02 crc kubenswrapper[4492]: E1126 08:09:02.101754 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26225f43-35e1-471c-9061-702aedd9bd7f" containerName="registry-server" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.101768 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="26225f43-35e1-471c-9061-702aedd9bd7f" containerName="registry-server" Nov 26 08:09:02 crc kubenswrapper[4492]: E1126 08:09:02.101782 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26225f43-35e1-471c-9061-702aedd9bd7f" containerName="extract-content" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.101787 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="26225f43-35e1-471c-9061-702aedd9bd7f" containerName="extract-content" Nov 26 08:09:02 crc kubenswrapper[4492]: E1126 08:09:02.101810 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26225f43-35e1-471c-9061-702aedd9bd7f" containerName="extract-utilities" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.101815 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="26225f43-35e1-471c-9061-702aedd9bd7f" containerName="extract-utilities" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.102037 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="26225f43-35e1-471c-9061-702aedd9bd7f" containerName="registry-server" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.103524 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.111514 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tgkhm"] Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.142570 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-catalog-content\") pod \"certified-operators-tgkhm\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.142699 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhvzv\" (UniqueName: \"kubernetes.io/projected/c15d7baa-ccb1-4279-8b3c-11322522d6dc-kube-api-access-rhvzv\") pod \"certified-operators-tgkhm\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.142735 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-utilities\") pod \"certified-operators-tgkhm\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.244233 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-utilities\") pod \"certified-operators-tgkhm\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.244372 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-catalog-content\") pod \"certified-operators-tgkhm\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.244401 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhvzv\" (UniqueName: \"kubernetes.io/projected/c15d7baa-ccb1-4279-8b3c-11322522d6dc-kube-api-access-rhvzv\") pod \"certified-operators-tgkhm\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.244810 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-catalog-content\") pod \"certified-operators-tgkhm\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.245007 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-utilities\") pod \"certified-operators-tgkhm\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.261984 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhvzv\" (UniqueName: \"kubernetes.io/projected/c15d7baa-ccb1-4279-8b3c-11322522d6dc-kube-api-access-rhvzv\") pod \"certified-operators-tgkhm\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.421429 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:02 crc kubenswrapper[4492]: I1126 08:09:02.909151 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tgkhm"] Nov 26 08:09:03 crc kubenswrapper[4492]: I1126 08:09:03.069249 4492 generic.go:334] "Generic (PLEG): container finished" podID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerID="d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8" exitCode=0 Nov 26 08:09:03 crc kubenswrapper[4492]: I1126 08:09:03.069390 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tgkhm" event={"ID":"c15d7baa-ccb1-4279-8b3c-11322522d6dc","Type":"ContainerDied","Data":"d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8"} Nov 26 08:09:03 crc kubenswrapper[4492]: I1126 08:09:03.069475 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tgkhm" event={"ID":"c15d7baa-ccb1-4279-8b3c-11322522d6dc","Type":"ContainerStarted","Data":"7b74681b085d791c44be71a23968138b118d21cbc105afcc7c4d01e5c367c3f8"} Nov 26 08:09:03 crc kubenswrapper[4492]: I1126 08:09:03.071794 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:09:04 crc kubenswrapper[4492]: I1126 08:09:04.076844 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tgkhm" event={"ID":"c15d7baa-ccb1-4279-8b3c-11322522d6dc","Type":"ContainerStarted","Data":"3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46"} Nov 26 08:09:05 crc kubenswrapper[4492]: I1126 08:09:05.086432 4492 generic.go:334] "Generic (PLEG): container finished" podID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerID="3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46" exitCode=0 Nov 26 08:09:05 crc kubenswrapper[4492]: I1126 08:09:05.086522 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tgkhm" event={"ID":"c15d7baa-ccb1-4279-8b3c-11322522d6dc","Type":"ContainerDied","Data":"3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46"} Nov 26 08:09:06 crc kubenswrapper[4492]: I1126 08:09:06.094736 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tgkhm" event={"ID":"c15d7baa-ccb1-4279-8b3c-11322522d6dc","Type":"ContainerStarted","Data":"c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a"} Nov 26 08:09:06 crc kubenswrapper[4492]: I1126 08:09:06.111818 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tgkhm" podStartSLOduration=1.575132596 podStartE2EDuration="4.111800452s" podCreationTimestamp="2025-11-26 08:09:02 +0000 UTC" firstStartedPulling="2025-11-26 08:09:03.071048629 +0000 UTC m=+4838.954936928" lastFinishedPulling="2025-11-26 08:09:05.607716487 +0000 UTC m=+4841.491604784" observedRunningTime="2025-11-26 08:09:06.108553437 +0000 UTC m=+4841.992441735" watchObservedRunningTime="2025-11-26 08:09:06.111800452 +0000 UTC m=+4841.995688751" Nov 26 08:09:12 crc kubenswrapper[4492]: I1126 08:09:12.421698 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:12 crc kubenswrapper[4492]: I1126 08:09:12.422079 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:12 crc kubenswrapper[4492]: I1126 08:09:12.457814 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:13 crc kubenswrapper[4492]: I1126 08:09:13.189292 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:13 crc kubenswrapper[4492]: I1126 08:09:13.230762 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tgkhm"] Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.170939 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tgkhm" podUID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerName="registry-server" containerID="cri-o://c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a" gracePeriod=2 Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.563912 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.760896 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-catalog-content\") pod \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.760964 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhvzv\" (UniqueName: \"kubernetes.io/projected/c15d7baa-ccb1-4279-8b3c-11322522d6dc-kube-api-access-rhvzv\") pod \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.761286 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-utilities\") pod \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\" (UID: \"c15d7baa-ccb1-4279-8b3c-11322522d6dc\") " Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.762400 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-utilities" (OuterVolumeSpecName: "utilities") pod "c15d7baa-ccb1-4279-8b3c-11322522d6dc" (UID: "c15d7baa-ccb1-4279-8b3c-11322522d6dc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.770440 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c15d7baa-ccb1-4279-8b3c-11322522d6dc-kube-api-access-rhvzv" (OuterVolumeSpecName: "kube-api-access-rhvzv") pod "c15d7baa-ccb1-4279-8b3c-11322522d6dc" (UID: "c15d7baa-ccb1-4279-8b3c-11322522d6dc"). InnerVolumeSpecName "kube-api-access-rhvzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.798149 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c15d7baa-ccb1-4279-8b3c-11322522d6dc" (UID: "c15d7baa-ccb1-4279-8b3c-11322522d6dc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.863870 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.864054 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhvzv\" (UniqueName: \"kubernetes.io/projected/c15d7baa-ccb1-4279-8b3c-11322522d6dc-kube-api-access-rhvzv\") on node \"crc\" DevicePath \"\"" Nov 26 08:09:15 crc kubenswrapper[4492]: I1126 08:09:15.864066 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c15d7baa-ccb1-4279-8b3c-11322522d6dc-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.179003 4492 generic.go:334] "Generic (PLEG): container finished" podID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerID="c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a" exitCode=0 Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.179037 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tgkhm" event={"ID":"c15d7baa-ccb1-4279-8b3c-11322522d6dc","Type":"ContainerDied","Data":"c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a"} Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.179050 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tgkhm" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.179062 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tgkhm" event={"ID":"c15d7baa-ccb1-4279-8b3c-11322522d6dc","Type":"ContainerDied","Data":"7b74681b085d791c44be71a23968138b118d21cbc105afcc7c4d01e5c367c3f8"} Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.179078 4492 scope.go:117] "RemoveContainer" containerID="c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.197209 4492 scope.go:117] "RemoveContainer" containerID="3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.211321 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tgkhm"] Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.219051 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tgkhm"] Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.232964 4492 scope.go:117] "RemoveContainer" containerID="d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.251345 4492 scope.go:117] "RemoveContainer" containerID="c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a" Nov 26 08:09:16 crc kubenswrapper[4492]: E1126 08:09:16.252084 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a\": container with ID starting with c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a not found: ID does not exist" containerID="c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.252124 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a"} err="failed to get container status \"c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a\": rpc error: code = NotFound desc = could not find container \"c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a\": container with ID starting with c36265083df13c0ac7b3688ea65552782d563826292af0bedc83debd95fb5c3a not found: ID does not exist" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.252147 4492 scope.go:117] "RemoveContainer" containerID="3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46" Nov 26 08:09:16 crc kubenswrapper[4492]: E1126 08:09:16.252525 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46\": container with ID starting with 3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46 not found: ID does not exist" containerID="3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.252546 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46"} err="failed to get container status \"3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46\": rpc error: code = NotFound desc = could not find container \"3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46\": container with ID starting with 3ce43cb7a1dabd1e07b2bee6848fd3fea2921221056d3a848971ebf3ec645a46 not found: ID does not exist" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.252561 4492 scope.go:117] "RemoveContainer" containerID="d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8" Nov 26 08:09:16 crc kubenswrapper[4492]: E1126 08:09:16.252807 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8\": container with ID starting with d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8 not found: ID does not exist" containerID="d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.252833 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8"} err="failed to get container status \"d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8\": rpc error: code = NotFound desc = could not find container \"d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8\": container with ID starting with d6b2f36ecaf7becc91e68053a321e98116cca59c2079a2b4359ff605174d0bd8 not found: ID does not exist" Nov 26 08:09:16 crc kubenswrapper[4492]: I1126 08:09:16.446075 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" path="/var/lib/kubelet/pods/c15d7baa-ccb1-4279-8b3c-11322522d6dc/volumes" Nov 26 08:10:11 crc kubenswrapper[4492]: E1126 08:10:11.462916 4492 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.180:50308->192.168.25.180:45641: write tcp 192.168.25.180:50308->192.168.25.180:45641: write: broken pipe Nov 26 08:10:12 crc kubenswrapper[4492]: E1126 08:10:12.497898 4492 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.180:50344->192.168.25.180:45641: write tcp 192.168.25.180:50344->192.168.25.180:45641: write: broken pipe Nov 26 08:10:19 crc kubenswrapper[4492]: I1126 08:10:19.441730 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:10:19 crc kubenswrapper[4492]: I1126 08:10:19.442305 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.981531 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6fdjv"] Nov 26 08:10:43 crc kubenswrapper[4492]: E1126 08:10:43.982326 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerName="extract-content" Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.982339 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerName="extract-content" Nov 26 08:10:43 crc kubenswrapper[4492]: E1126 08:10:43.982360 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerName="registry-server" Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.982367 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerName="registry-server" Nov 26 08:10:43 crc kubenswrapper[4492]: E1126 08:10:43.982385 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerName="extract-utilities" Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.982392 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerName="extract-utilities" Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.982573 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="c15d7baa-ccb1-4279-8b3c-11322522d6dc" containerName="registry-server" Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.984108 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.989754 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6fdjv"] Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.996908 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-utilities\") pod \"redhat-operators-6fdjv\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.997012 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-catalog-content\") pod \"redhat-operators-6fdjv\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:43 crc kubenswrapper[4492]: I1126 08:10:43.997084 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxj7n\" (UniqueName: \"kubernetes.io/projected/4a5d68ef-dd2a-475b-b9a3-2339b378337d-kube-api-access-jxj7n\") pod \"redhat-operators-6fdjv\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:44 crc kubenswrapper[4492]: I1126 08:10:44.098849 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-utilities\") pod \"redhat-operators-6fdjv\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:44 crc kubenswrapper[4492]: I1126 08:10:44.099141 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-catalog-content\") pod \"redhat-operators-6fdjv\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:44 crc kubenswrapper[4492]: I1126 08:10:44.099198 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxj7n\" (UniqueName: \"kubernetes.io/projected/4a5d68ef-dd2a-475b-b9a3-2339b378337d-kube-api-access-jxj7n\") pod \"redhat-operators-6fdjv\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:44 crc kubenswrapper[4492]: I1126 08:10:44.099463 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-utilities\") pod \"redhat-operators-6fdjv\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:44 crc kubenswrapper[4492]: I1126 08:10:44.099520 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-catalog-content\") pod \"redhat-operators-6fdjv\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:44 crc kubenswrapper[4492]: I1126 08:10:44.393833 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxj7n\" (UniqueName: \"kubernetes.io/projected/4a5d68ef-dd2a-475b-b9a3-2339b378337d-kube-api-access-jxj7n\") pod \"redhat-operators-6fdjv\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:44 crc kubenswrapper[4492]: I1126 08:10:44.613722 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:45 crc kubenswrapper[4492]: I1126 08:10:45.020760 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6fdjv"] Nov 26 08:10:45 crc kubenswrapper[4492]: I1126 08:10:45.836422 4492 generic.go:334] "Generic (PLEG): container finished" podID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerID="829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0" exitCode=0 Nov 26 08:10:45 crc kubenswrapper[4492]: I1126 08:10:45.836621 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fdjv" event={"ID":"4a5d68ef-dd2a-475b-b9a3-2339b378337d","Type":"ContainerDied","Data":"829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0"} Nov 26 08:10:45 crc kubenswrapper[4492]: I1126 08:10:45.836998 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fdjv" event={"ID":"4a5d68ef-dd2a-475b-b9a3-2339b378337d","Type":"ContainerStarted","Data":"610950bc2d409641aacbf6b92fcab999897f75b3424994343278ac3eaf74634c"} Nov 26 08:10:46 crc kubenswrapper[4492]: I1126 08:10:46.845864 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fdjv" event={"ID":"4a5d68ef-dd2a-475b-b9a3-2339b378337d","Type":"ContainerStarted","Data":"d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c"} Nov 26 08:10:48 crc kubenswrapper[4492]: I1126 08:10:48.863059 4492 generic.go:334] "Generic (PLEG): container finished" podID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerID="d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c" exitCode=0 Nov 26 08:10:48 crc kubenswrapper[4492]: I1126 08:10:48.863141 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fdjv" event={"ID":"4a5d68ef-dd2a-475b-b9a3-2339b378337d","Type":"ContainerDied","Data":"d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c"} Nov 26 08:10:49 crc kubenswrapper[4492]: I1126 08:10:49.441518 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:10:49 crc kubenswrapper[4492]: I1126 08:10:49.441553 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:10:49 crc kubenswrapper[4492]: I1126 08:10:49.872673 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fdjv" event={"ID":"4a5d68ef-dd2a-475b-b9a3-2339b378337d","Type":"ContainerStarted","Data":"5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972"} Nov 26 08:10:49 crc kubenswrapper[4492]: I1126 08:10:49.891575 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6fdjv" podStartSLOduration=3.4026433640000002 podStartE2EDuration="6.891561214s" podCreationTimestamp="2025-11-26 08:10:43 +0000 UTC" firstStartedPulling="2025-11-26 08:10:45.837988231 +0000 UTC m=+4941.721876529" lastFinishedPulling="2025-11-26 08:10:49.326906081 +0000 UTC m=+4945.210794379" observedRunningTime="2025-11-26 08:10:49.88678298 +0000 UTC m=+4945.770671278" watchObservedRunningTime="2025-11-26 08:10:49.891561214 +0000 UTC m=+4945.775449512" Nov 26 08:10:54 crc kubenswrapper[4492]: I1126 08:10:54.614704 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:54 crc kubenswrapper[4492]: I1126 08:10:54.615196 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:10:55 crc kubenswrapper[4492]: I1126 08:10:55.647763 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6fdjv" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerName="registry-server" probeResult="failure" output=< Nov 26 08:10:55 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:10:55 crc kubenswrapper[4492]: > Nov 26 08:11:04 crc kubenswrapper[4492]: I1126 08:11:04.646403 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:11:04 crc kubenswrapper[4492]: I1126 08:11:04.684239 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:11:04 crc kubenswrapper[4492]: I1126 08:11:04.875954 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6fdjv"] Nov 26 08:11:05 crc kubenswrapper[4492]: I1126 08:11:05.985525 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6fdjv" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerName="registry-server" containerID="cri-o://5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972" gracePeriod=2 Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.483396 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.673914 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-utilities\") pod \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.673955 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-catalog-content\") pod \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.674027 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxj7n\" (UniqueName: \"kubernetes.io/projected/4a5d68ef-dd2a-475b-b9a3-2339b378337d-kube-api-access-jxj7n\") pod \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\" (UID: \"4a5d68ef-dd2a-475b-b9a3-2339b378337d\") " Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.674486 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-utilities" (OuterVolumeSpecName: "utilities") pod "4a5d68ef-dd2a-475b-b9a3-2339b378337d" (UID: "4a5d68ef-dd2a-475b-b9a3-2339b378337d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.679257 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a5d68ef-dd2a-475b-b9a3-2339b378337d-kube-api-access-jxj7n" (OuterVolumeSpecName: "kube-api-access-jxj7n") pod "4a5d68ef-dd2a-475b-b9a3-2339b378337d" (UID: "4a5d68ef-dd2a-475b-b9a3-2339b378337d"). InnerVolumeSpecName "kube-api-access-jxj7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.735725 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a5d68ef-dd2a-475b-b9a3-2339b378337d" (UID: "4a5d68ef-dd2a-475b-b9a3-2339b378337d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.776305 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.776328 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a5d68ef-dd2a-475b-b9a3-2339b378337d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.776339 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxj7n\" (UniqueName: \"kubernetes.io/projected/4a5d68ef-dd2a-475b-b9a3-2339b378337d-kube-api-access-jxj7n\") on node \"crc\" DevicePath \"\"" Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.993197 4492 generic.go:334] "Generic (PLEG): container finished" podID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerID="5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972" exitCode=0 Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.993286 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fdjv" event={"ID":"4a5d68ef-dd2a-475b-b9a3-2339b378337d","Type":"ContainerDied","Data":"5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972"} Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.993339 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6fdjv" Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.994706 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6fdjv" event={"ID":"4a5d68ef-dd2a-475b-b9a3-2339b378337d","Type":"ContainerDied","Data":"610950bc2d409641aacbf6b92fcab999897f75b3424994343278ac3eaf74634c"} Nov 26 08:11:06 crc kubenswrapper[4492]: I1126 08:11:06.994819 4492 scope.go:117] "RemoveContainer" containerID="5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972" Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.028271 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6fdjv"] Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.029593 4492 scope.go:117] "RemoveContainer" containerID="d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c" Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.046804 4492 scope.go:117] "RemoveContainer" containerID="829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0" Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.050932 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6fdjv"] Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.081326 4492 scope.go:117] "RemoveContainer" containerID="5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972" Nov 26 08:11:07 crc kubenswrapper[4492]: E1126 08:11:07.081665 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972\": container with ID starting with 5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972 not found: ID does not exist" containerID="5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972" Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.081712 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972"} err="failed to get container status \"5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972\": rpc error: code = NotFound desc = could not find container \"5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972\": container with ID starting with 5bed4b1bea8771624d21d30ba0c75c2da8b753e7fd50a477f99d7d664f3fb972 not found: ID does not exist" Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.081737 4492 scope.go:117] "RemoveContainer" containerID="d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c" Nov 26 08:11:07 crc kubenswrapper[4492]: E1126 08:11:07.082018 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c\": container with ID starting with d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c not found: ID does not exist" containerID="d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c" Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.082053 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c"} err="failed to get container status \"d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c\": rpc error: code = NotFound desc = could not find container \"d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c\": container with ID starting with d9c43a064c32e99a7e07444ecc27bc53e3585b4a7ab25e908c430dce7bf09e1c not found: ID does not exist" Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.082077 4492 scope.go:117] "RemoveContainer" containerID="829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0" Nov 26 08:11:07 crc kubenswrapper[4492]: E1126 08:11:07.082380 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0\": container with ID starting with 829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0 not found: ID does not exist" containerID="829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0" Nov 26 08:11:07 crc kubenswrapper[4492]: I1126 08:11:07.082404 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0"} err="failed to get container status \"829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0\": rpc error: code = NotFound desc = could not find container \"829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0\": container with ID starting with 829d9bfea23a1368979bbecf6823b06502f12e8e8d64403cf8cec92176ba2be0 not found: ID does not exist" Nov 26 08:11:08 crc kubenswrapper[4492]: I1126 08:11:08.446339 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" path="/var/lib/kubelet/pods/4a5d68ef-dd2a-475b-b9a3-2339b378337d/volumes" Nov 26 08:11:19 crc kubenswrapper[4492]: I1126 08:11:19.441734 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:11:19 crc kubenswrapper[4492]: I1126 08:11:19.442157 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:11:19 crc kubenswrapper[4492]: I1126 08:11:19.442209 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:11:19 crc kubenswrapper[4492]: I1126 08:11:19.442673 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:11:19 crc kubenswrapper[4492]: I1126 08:11:19.442735 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" gracePeriod=600 Nov 26 08:11:19 crc kubenswrapper[4492]: E1126 08:11:19.558733 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:11:20 crc kubenswrapper[4492]: I1126 08:11:20.080198 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" exitCode=0 Nov 26 08:11:20 crc kubenswrapper[4492]: I1126 08:11:20.080260 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397"} Nov 26 08:11:20 crc kubenswrapper[4492]: I1126 08:11:20.080461 4492 scope.go:117] "RemoveContainer" containerID="4294d04f48a00d21db3973cf76008e401cc1a49db778781a5749be9989d44808" Nov 26 08:11:20 crc kubenswrapper[4492]: I1126 08:11:20.081034 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:11:20 crc kubenswrapper[4492]: E1126 08:11:20.081419 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:11:31 crc kubenswrapper[4492]: I1126 08:11:31.438061 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:11:31 crc kubenswrapper[4492]: E1126 08:11:31.438649 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:11:45 crc kubenswrapper[4492]: I1126 08:11:45.437952 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:11:45 crc kubenswrapper[4492]: E1126 08:11:45.438486 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:11:59 crc kubenswrapper[4492]: I1126 08:11:59.438942 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:11:59 crc kubenswrapper[4492]: E1126 08:11:59.439730 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:12:12 crc kubenswrapper[4492]: I1126 08:12:12.438900 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:12:12 crc kubenswrapper[4492]: E1126 08:12:12.439518 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:12:27 crc kubenswrapper[4492]: I1126 08:12:27.438704 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:12:27 crc kubenswrapper[4492]: E1126 08:12:27.439590 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:12:40 crc kubenswrapper[4492]: I1126 08:12:40.438321 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:12:40 crc kubenswrapper[4492]: E1126 08:12:40.439715 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:12:51 crc kubenswrapper[4492]: I1126 08:12:51.438691 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:12:51 crc kubenswrapper[4492]: E1126 08:12:51.439976 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:13:06 crc kubenswrapper[4492]: I1126 08:13:06.439325 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:13:06 crc kubenswrapper[4492]: E1126 08:13:06.440396 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:13:19 crc kubenswrapper[4492]: I1126 08:13:19.439576 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:13:19 crc kubenswrapper[4492]: E1126 08:13:19.440674 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:13:32 crc kubenswrapper[4492]: I1126 08:13:32.438660 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:13:32 crc kubenswrapper[4492]: E1126 08:13:32.439552 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:13:43 crc kubenswrapper[4492]: I1126 08:13:43.439012 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:13:43 crc kubenswrapper[4492]: E1126 08:13:43.439969 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:13:58 crc kubenswrapper[4492]: I1126 08:13:58.439139 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:13:58 crc kubenswrapper[4492]: E1126 08:13:58.439930 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:14:10 crc kubenswrapper[4492]: I1126 08:14:10.438519 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:14:10 crc kubenswrapper[4492]: E1126 08:14:10.439325 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:14:25 crc kubenswrapper[4492]: I1126 08:14:25.438640 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:14:25 crc kubenswrapper[4492]: E1126 08:14:25.439446 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:14:39 crc kubenswrapper[4492]: I1126 08:14:39.439194 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:14:39 crc kubenswrapper[4492]: E1126 08:14:39.440069 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:14:54 crc kubenswrapper[4492]: I1126 08:14:54.444165 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:14:54 crc kubenswrapper[4492]: E1126 08:14:54.445753 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.231188 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb"] Nov 26 08:15:00 crc kubenswrapper[4492]: E1126 08:15:00.232235 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerName="extract-utilities" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.232256 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerName="extract-utilities" Nov 26 08:15:00 crc kubenswrapper[4492]: E1126 08:15:00.232321 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerName="extract-content" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.232328 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerName="extract-content" Nov 26 08:15:00 crc kubenswrapper[4492]: E1126 08:15:00.232341 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerName="registry-server" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.232347 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerName="registry-server" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.232587 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a5d68ef-dd2a-475b-b9a3-2339b378337d" containerName="registry-server" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.233392 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.243137 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.244657 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb"] Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.247111 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.413486 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcssb\" (UniqueName: \"kubernetes.io/projected/abacab60-1227-435b-8b35-afd719bc372e-kube-api-access-dcssb\") pod \"collect-profiles-29402415-gmtvb\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.413648 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/abacab60-1227-435b-8b35-afd719bc372e-secret-volume\") pod \"collect-profiles-29402415-gmtvb\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.413696 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/abacab60-1227-435b-8b35-afd719bc372e-config-volume\") pod \"collect-profiles-29402415-gmtvb\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.515216 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcssb\" (UniqueName: \"kubernetes.io/projected/abacab60-1227-435b-8b35-afd719bc372e-kube-api-access-dcssb\") pod \"collect-profiles-29402415-gmtvb\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.515298 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/abacab60-1227-435b-8b35-afd719bc372e-config-volume\") pod \"collect-profiles-29402415-gmtvb\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.515318 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/abacab60-1227-435b-8b35-afd719bc372e-secret-volume\") pod \"collect-profiles-29402415-gmtvb\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.517193 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/abacab60-1227-435b-8b35-afd719bc372e-config-volume\") pod \"collect-profiles-29402415-gmtvb\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.527907 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/abacab60-1227-435b-8b35-afd719bc372e-secret-volume\") pod \"collect-profiles-29402415-gmtvb\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.531504 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcssb\" (UniqueName: \"kubernetes.io/projected/abacab60-1227-435b-8b35-afd719bc372e-kube-api-access-dcssb\") pod \"collect-profiles-29402415-gmtvb\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:00 crc kubenswrapper[4492]: I1126 08:15:00.564995 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:01 crc kubenswrapper[4492]: I1126 08:15:01.148510 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb"] Nov 26 08:15:01 crc kubenswrapper[4492]: I1126 08:15:01.750540 4492 generic.go:334] "Generic (PLEG): container finished" podID="abacab60-1227-435b-8b35-afd719bc372e" containerID="1f6588e4efc1002b62d0ac16c7095f3db0f7f23d829deab454540049f7e73290" exitCode=0 Nov 26 08:15:01 crc kubenswrapper[4492]: I1126 08:15:01.750761 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" event={"ID":"abacab60-1227-435b-8b35-afd719bc372e","Type":"ContainerDied","Data":"1f6588e4efc1002b62d0ac16c7095f3db0f7f23d829deab454540049f7e73290"} Nov 26 08:15:01 crc kubenswrapper[4492]: I1126 08:15:01.750833 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" event={"ID":"abacab60-1227-435b-8b35-afd719bc372e","Type":"ContainerStarted","Data":"d2b0599f7b027f1bcae1f4e7d06ce980e14e2a6edf0f565eb5cb9919406d3df9"} Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.092740 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.288880 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcssb\" (UniqueName: \"kubernetes.io/projected/abacab60-1227-435b-8b35-afd719bc372e-kube-api-access-dcssb\") pod \"abacab60-1227-435b-8b35-afd719bc372e\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.288942 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/abacab60-1227-435b-8b35-afd719bc372e-config-volume\") pod \"abacab60-1227-435b-8b35-afd719bc372e\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.289019 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/abacab60-1227-435b-8b35-afd719bc372e-secret-volume\") pod \"abacab60-1227-435b-8b35-afd719bc372e\" (UID: \"abacab60-1227-435b-8b35-afd719bc372e\") " Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.290071 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abacab60-1227-435b-8b35-afd719bc372e-config-volume" (OuterVolumeSpecName: "config-volume") pod "abacab60-1227-435b-8b35-afd719bc372e" (UID: "abacab60-1227-435b-8b35-afd719bc372e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.298255 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abacab60-1227-435b-8b35-afd719bc372e-kube-api-access-dcssb" (OuterVolumeSpecName: "kube-api-access-dcssb") pod "abacab60-1227-435b-8b35-afd719bc372e" (UID: "abacab60-1227-435b-8b35-afd719bc372e"). InnerVolumeSpecName "kube-api-access-dcssb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.298616 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abacab60-1227-435b-8b35-afd719bc372e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "abacab60-1227-435b-8b35-afd719bc372e" (UID: "abacab60-1227-435b-8b35-afd719bc372e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.393219 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcssb\" (UniqueName: \"kubernetes.io/projected/abacab60-1227-435b-8b35-afd719bc372e-kube-api-access-dcssb\") on node \"crc\" DevicePath \"\"" Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.393252 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/abacab60-1227-435b-8b35-afd719bc372e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.393263 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/abacab60-1227-435b-8b35-afd719bc372e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.776262 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" event={"ID":"abacab60-1227-435b-8b35-afd719bc372e","Type":"ContainerDied","Data":"d2b0599f7b027f1bcae1f4e7d06ce980e14e2a6edf0f565eb5cb9919406d3df9"} Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.776351 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb" Nov 26 08:15:03 crc kubenswrapper[4492]: I1126 08:15:03.776668 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2b0599f7b027f1bcae1f4e7d06ce980e14e2a6edf0f565eb5cb9919406d3df9" Nov 26 08:15:04 crc kubenswrapper[4492]: I1126 08:15:04.174156 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4"] Nov 26 08:15:04 crc kubenswrapper[4492]: I1126 08:15:04.179683 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402370-zbvz4"] Nov 26 08:15:04 crc kubenswrapper[4492]: I1126 08:15:04.452457 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5489cb28-d168-47f1-9976-98f3d6993ec0" path="/var/lib/kubelet/pods/5489cb28-d168-47f1-9976-98f3d6993ec0/volumes" Nov 26 08:15:08 crc kubenswrapper[4492]: I1126 08:15:08.439799 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:15:08 crc kubenswrapper[4492]: E1126 08:15:08.440346 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:15:22 crc kubenswrapper[4492]: I1126 08:15:22.439019 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:15:22 crc kubenswrapper[4492]: E1126 08:15:22.440072 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:15:37 crc kubenswrapper[4492]: I1126 08:15:37.439600 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:15:37 crc kubenswrapper[4492]: E1126 08:15:37.440688 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:15:52 crc kubenswrapper[4492]: I1126 08:15:52.438773 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:15:52 crc kubenswrapper[4492]: E1126 08:15:52.439668 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:16:00 crc kubenswrapper[4492]: I1126 08:16:00.878747 4492 scope.go:117] "RemoveContainer" containerID="0fc22a4230b1e40f5410be8b8f8e2f4aa077d6ad1a853539bde05c7ec45f429d" Nov 26 08:16:06 crc kubenswrapper[4492]: I1126 08:16:06.439083 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:16:06 crc kubenswrapper[4492]: E1126 08:16:06.439778 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:16:18 crc kubenswrapper[4492]: I1126 08:16:18.438237 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:16:18 crc kubenswrapper[4492]: E1126 08:16:18.438915 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:16:25 crc kubenswrapper[4492]: I1126 08:16:25.631623 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-645d6d85d7-cvr9h" podUID="e44b94a7-c7a7-40e1-8d00-9f27e0e0639e" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 26 08:16:31 crc kubenswrapper[4492]: I1126 08:16:31.439043 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:16:32 crc kubenswrapper[4492]: I1126 08:16:32.532869 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"b02586568611ce8b448b597d243cccc021cccf20898a15d2405c71ed42813d9a"} Nov 26 08:18:49 crc kubenswrapper[4492]: I1126 08:18:49.441120 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:18:49 crc kubenswrapper[4492]: I1126 08:18:49.442038 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.345197 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kp9gk"] Nov 26 08:19:09 crc kubenswrapper[4492]: E1126 08:19:09.346601 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abacab60-1227-435b-8b35-afd719bc372e" containerName="collect-profiles" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.346623 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="abacab60-1227-435b-8b35-afd719bc372e" containerName="collect-profiles" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.346902 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="abacab60-1227-435b-8b35-afd719bc372e" containerName="collect-profiles" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.350461 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.380879 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kp9gk"] Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.431047 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-utilities\") pod \"certified-operators-kp9gk\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.431302 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxtzz\" (UniqueName: \"kubernetes.io/projected/af58319c-2ee4-49b5-811e-ede90d199978-kube-api-access-gxtzz\") pod \"certified-operators-kp9gk\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.431334 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-catalog-content\") pod \"certified-operators-kp9gk\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.533597 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxtzz\" (UniqueName: \"kubernetes.io/projected/af58319c-2ee4-49b5-811e-ede90d199978-kube-api-access-gxtzz\") pod \"certified-operators-kp9gk\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.533644 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-catalog-content\") pod \"certified-operators-kp9gk\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.533734 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-utilities\") pod \"certified-operators-kp9gk\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.535898 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-catalog-content\") pod \"certified-operators-kp9gk\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.535998 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-utilities\") pod \"certified-operators-kp9gk\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.556406 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxtzz\" (UniqueName: \"kubernetes.io/projected/af58319c-2ee4-49b5-811e-ede90d199978-kube-api-access-gxtzz\") pod \"certified-operators-kp9gk\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:09 crc kubenswrapper[4492]: I1126 08:19:09.676984 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.533907 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kp9gk"] Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.739407 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wpbhr"] Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.742759 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.750590 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wpbhr"] Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.767013 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-catalog-content\") pod \"community-operators-wpbhr\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.767203 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nzh6\" (UniqueName: \"kubernetes.io/projected/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-kube-api-access-9nzh6\") pod \"community-operators-wpbhr\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.767244 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-utilities\") pod \"community-operators-wpbhr\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.869916 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nzh6\" (UniqueName: \"kubernetes.io/projected/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-kube-api-access-9nzh6\") pod \"community-operators-wpbhr\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.869971 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-utilities\") pod \"community-operators-wpbhr\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.870086 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-catalog-content\") pod \"community-operators-wpbhr\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.870903 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-catalog-content\") pod \"community-operators-wpbhr\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.870934 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-utilities\") pod \"community-operators-wpbhr\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:10 crc kubenswrapper[4492]: I1126 08:19:10.899953 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nzh6\" (UniqueName: \"kubernetes.io/projected/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-kube-api-access-9nzh6\") pod \"community-operators-wpbhr\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:11 crc kubenswrapper[4492]: I1126 08:19:11.005440 4492 generic.go:334] "Generic (PLEG): container finished" podID="af58319c-2ee4-49b5-811e-ede90d199978" containerID="4a6a17bd8a897a189d98557eb886e957bb27d6926c78112872ff4f770282fdb2" exitCode=0 Nov 26 08:19:11 crc kubenswrapper[4492]: I1126 08:19:11.005500 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kp9gk" event={"ID":"af58319c-2ee4-49b5-811e-ede90d199978","Type":"ContainerDied","Data":"4a6a17bd8a897a189d98557eb886e957bb27d6926c78112872ff4f770282fdb2"} Nov 26 08:19:11 crc kubenswrapper[4492]: I1126 08:19:11.005565 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kp9gk" event={"ID":"af58319c-2ee4-49b5-811e-ede90d199978","Type":"ContainerStarted","Data":"4f95609ebbabf9738905dc822ba83bc3ee6b0cf0d54c44e7b44ebdd6d53d674d"} Nov 26 08:19:11 crc kubenswrapper[4492]: I1126 08:19:11.011926 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:19:11 crc kubenswrapper[4492]: I1126 08:19:11.066943 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:11 crc kubenswrapper[4492]: E1126 08:19:11.688583 4492 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.180:41390->192.168.25.180:45641: write tcp 192.168.25.180:41390->192.168.25.180:45641: write: connection reset by peer Nov 26 08:19:11 crc kubenswrapper[4492]: I1126 08:19:11.810480 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wpbhr"] Nov 26 08:19:12 crc kubenswrapper[4492]: I1126 08:19:12.016083 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wpbhr" event={"ID":"7dc2a85e-bc55-4e32-9ea5-30389384c8b1","Type":"ContainerStarted","Data":"0eb68e9aaddfc6864a18148dfb9a5a4a73c22abbb262d85032ed2ffbe1512775"} Nov 26 08:19:12 crc kubenswrapper[4492]: I1126 08:19:12.936909 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-plj5z"] Nov 26 08:19:12 crc kubenswrapper[4492]: I1126 08:19:12.941590 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:12 crc kubenswrapper[4492]: I1126 08:19:12.964182 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-plj5z"] Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.027983 4492 generic.go:334] "Generic (PLEG): container finished" podID="af58319c-2ee4-49b5-811e-ede90d199978" containerID="092c563a8de2235b20c6d3b7aa58ceb7cd919bea0060399e3a2631acca3526e8" exitCode=0 Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.028054 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kp9gk" event={"ID":"af58319c-2ee4-49b5-811e-ede90d199978","Type":"ContainerDied","Data":"092c563a8de2235b20c6d3b7aa58ceb7cd919bea0060399e3a2631acca3526e8"} Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.032474 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wpbhr" event={"ID":"7dc2a85e-bc55-4e32-9ea5-30389384c8b1","Type":"ContainerDied","Data":"7cd3e2d9ea51c3ab7cf85274b82b01c2ce4de9ad6f4c00c945d9f53ae164611e"} Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.032533 4492 generic.go:334] "Generic (PLEG): container finished" podID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerID="7cd3e2d9ea51c3ab7cf85274b82b01c2ce4de9ad6f4c00c945d9f53ae164611e" exitCode=0 Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.126465 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-utilities\") pod \"redhat-marketplace-plj5z\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.126545 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k754p\" (UniqueName: \"kubernetes.io/projected/54e1aa82-cac4-43e9-8897-3b8e45e41217-kube-api-access-k754p\") pod \"redhat-marketplace-plj5z\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.126579 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-catalog-content\") pod \"redhat-marketplace-plj5z\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.229484 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-utilities\") pod \"redhat-marketplace-plj5z\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.229588 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k754p\" (UniqueName: \"kubernetes.io/projected/54e1aa82-cac4-43e9-8897-3b8e45e41217-kube-api-access-k754p\") pod \"redhat-marketplace-plj5z\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.229619 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-catalog-content\") pod \"redhat-marketplace-plj5z\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.230234 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-catalog-content\") pod \"redhat-marketplace-plj5z\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.230518 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-utilities\") pod \"redhat-marketplace-plj5z\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.261197 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k754p\" (UniqueName: \"kubernetes.io/projected/54e1aa82-cac4-43e9-8897-3b8e45e41217-kube-api-access-k754p\") pod \"redhat-marketplace-plj5z\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.268148 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:13 crc kubenswrapper[4492]: I1126 08:19:13.897642 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-plj5z"] Nov 26 08:19:14 crc kubenswrapper[4492]: I1126 08:19:14.055878 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-plj5z" event={"ID":"54e1aa82-cac4-43e9-8897-3b8e45e41217","Type":"ContainerStarted","Data":"6cde7bcf673da4a7d9be75dc3b1af49bf688f37bc78d49357c15e38b44d237e9"} Nov 26 08:19:14 crc kubenswrapper[4492]: I1126 08:19:14.059903 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wpbhr" event={"ID":"7dc2a85e-bc55-4e32-9ea5-30389384c8b1","Type":"ContainerStarted","Data":"6ecfaab3ecd10f05c211298a38bd031875e6b647ed50744db01c896c9aeeb3cf"} Nov 26 08:19:14 crc kubenswrapper[4492]: I1126 08:19:14.062997 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kp9gk" event={"ID":"af58319c-2ee4-49b5-811e-ede90d199978","Type":"ContainerStarted","Data":"c2453a8ebdc1a49d9f2c4e69bde02287bcc37053acdd615b59fe4761fd8ee53a"} Nov 26 08:19:14 crc kubenswrapper[4492]: I1126 08:19:14.117079 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kp9gk" podStartSLOduration=2.581114865 podStartE2EDuration="5.116757508s" podCreationTimestamp="2025-11-26 08:19:09 +0000 UTC" firstStartedPulling="2025-11-26 08:19:11.007622134 +0000 UTC m=+5446.891510432" lastFinishedPulling="2025-11-26 08:19:13.543264777 +0000 UTC m=+5449.427153075" observedRunningTime="2025-11-26 08:19:14.10552536 +0000 UTC m=+5449.989413658" watchObservedRunningTime="2025-11-26 08:19:14.116757508 +0000 UTC m=+5450.000645806" Nov 26 08:19:15 crc kubenswrapper[4492]: I1126 08:19:15.079396 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wpbhr" event={"ID":"7dc2a85e-bc55-4e32-9ea5-30389384c8b1","Type":"ContainerDied","Data":"6ecfaab3ecd10f05c211298a38bd031875e6b647ed50744db01c896c9aeeb3cf"} Nov 26 08:19:15 crc kubenswrapper[4492]: I1126 08:19:15.079274 4492 generic.go:334] "Generic (PLEG): container finished" podID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerID="6ecfaab3ecd10f05c211298a38bd031875e6b647ed50744db01c896c9aeeb3cf" exitCode=0 Nov 26 08:19:15 crc kubenswrapper[4492]: I1126 08:19:15.084682 4492 generic.go:334] "Generic (PLEG): container finished" podID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerID="a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583" exitCode=0 Nov 26 08:19:15 crc kubenswrapper[4492]: I1126 08:19:15.086435 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-plj5z" event={"ID":"54e1aa82-cac4-43e9-8897-3b8e45e41217","Type":"ContainerDied","Data":"a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583"} Nov 26 08:19:15 crc kubenswrapper[4492]: E1126 08:19:15.316784 4492 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7dc2a85e_bc55_4e32_9ea5_30389384c8b1.slice/crio-6ecfaab3ecd10f05c211298a38bd031875e6b647ed50744db01c896c9aeeb3cf.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7dc2a85e_bc55_4e32_9ea5_30389384c8b1.slice/crio-conmon-6ecfaab3ecd10f05c211298a38bd031875e6b647ed50744db01c896c9aeeb3cf.scope\": RecentStats: unable to find data in memory cache]" Nov 26 08:19:16 crc kubenswrapper[4492]: I1126 08:19:16.095455 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-plj5z" event={"ID":"54e1aa82-cac4-43e9-8897-3b8e45e41217","Type":"ContainerStarted","Data":"b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0"} Nov 26 08:19:16 crc kubenswrapper[4492]: I1126 08:19:16.098654 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wpbhr" event={"ID":"7dc2a85e-bc55-4e32-9ea5-30389384c8b1","Type":"ContainerStarted","Data":"3277d3b976bd41a96514cf901677093070ca94e05288f1853d2b7f87a25ef2e3"} Nov 26 08:19:16 crc kubenswrapper[4492]: I1126 08:19:16.165944 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wpbhr" podStartSLOduration=3.574892965 podStartE2EDuration="6.165922277s" podCreationTimestamp="2025-11-26 08:19:10 +0000 UTC" firstStartedPulling="2025-11-26 08:19:13.034970222 +0000 UTC m=+5448.918858520" lastFinishedPulling="2025-11-26 08:19:15.625999534 +0000 UTC m=+5451.509887832" observedRunningTime="2025-11-26 08:19:16.161517865 +0000 UTC m=+5452.045406163" watchObservedRunningTime="2025-11-26 08:19:16.165922277 +0000 UTC m=+5452.049810575" Nov 26 08:19:17 crc kubenswrapper[4492]: I1126 08:19:17.111915 4492 generic.go:334] "Generic (PLEG): container finished" podID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerID="b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0" exitCode=0 Nov 26 08:19:17 crc kubenswrapper[4492]: I1126 08:19:17.112032 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-plj5z" event={"ID":"54e1aa82-cac4-43e9-8897-3b8e45e41217","Type":"ContainerDied","Data":"b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0"} Nov 26 08:19:18 crc kubenswrapper[4492]: I1126 08:19:18.126693 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-plj5z" event={"ID":"54e1aa82-cac4-43e9-8897-3b8e45e41217","Type":"ContainerStarted","Data":"5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83"} Nov 26 08:19:18 crc kubenswrapper[4492]: I1126 08:19:18.173958 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-plj5z" podStartSLOduration=3.654414479 podStartE2EDuration="6.173931073s" podCreationTimestamp="2025-11-26 08:19:12 +0000 UTC" firstStartedPulling="2025-11-26 08:19:15.090508744 +0000 UTC m=+5450.974397042" lastFinishedPulling="2025-11-26 08:19:17.610025338 +0000 UTC m=+5453.493913636" observedRunningTime="2025-11-26 08:19:18.157960746 +0000 UTC m=+5454.041849044" watchObservedRunningTime="2025-11-26 08:19:18.173931073 +0000 UTC m=+5454.057819371" Nov 26 08:19:19 crc kubenswrapper[4492]: I1126 08:19:19.441487 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:19:19 crc kubenswrapper[4492]: I1126 08:19:19.442125 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:19:19 crc kubenswrapper[4492]: I1126 08:19:19.678312 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:19 crc kubenswrapper[4492]: I1126 08:19:19.678390 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:20 crc kubenswrapper[4492]: I1126 08:19:20.757941 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-kp9gk" podUID="af58319c-2ee4-49b5-811e-ede90d199978" containerName="registry-server" probeResult="failure" output=< Nov 26 08:19:20 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:19:20 crc kubenswrapper[4492]: > Nov 26 08:19:21 crc kubenswrapper[4492]: I1126 08:19:21.067467 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:21 crc kubenswrapper[4492]: I1126 08:19:21.067830 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:22 crc kubenswrapper[4492]: I1126 08:19:22.104826 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-wpbhr" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerName="registry-server" probeResult="failure" output=< Nov 26 08:19:22 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:19:22 crc kubenswrapper[4492]: > Nov 26 08:19:23 crc kubenswrapper[4492]: I1126 08:19:23.268623 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:23 crc kubenswrapper[4492]: I1126 08:19:23.268973 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:23 crc kubenswrapper[4492]: I1126 08:19:23.327256 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:24 crc kubenswrapper[4492]: I1126 08:19:24.233701 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:25 crc kubenswrapper[4492]: I1126 08:19:25.530680 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-plj5z"] Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.205819 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-plj5z" podUID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerName="registry-server" containerID="cri-o://5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83" gracePeriod=2 Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.735107 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.748443 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-catalog-content\") pod \"54e1aa82-cac4-43e9-8897-3b8e45e41217\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.748606 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k754p\" (UniqueName: \"kubernetes.io/projected/54e1aa82-cac4-43e9-8897-3b8e45e41217-kube-api-access-k754p\") pod \"54e1aa82-cac4-43e9-8897-3b8e45e41217\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.748811 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-utilities\") pod \"54e1aa82-cac4-43e9-8897-3b8e45e41217\" (UID: \"54e1aa82-cac4-43e9-8897-3b8e45e41217\") " Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.751502 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-utilities" (OuterVolumeSpecName: "utilities") pod "54e1aa82-cac4-43e9-8897-3b8e45e41217" (UID: "54e1aa82-cac4-43e9-8897-3b8e45e41217"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.764440 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54e1aa82-cac4-43e9-8897-3b8e45e41217-kube-api-access-k754p" (OuterVolumeSpecName: "kube-api-access-k754p") pod "54e1aa82-cac4-43e9-8897-3b8e45e41217" (UID: "54e1aa82-cac4-43e9-8897-3b8e45e41217"). InnerVolumeSpecName "kube-api-access-k754p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.769814 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "54e1aa82-cac4-43e9-8897-3b8e45e41217" (UID: "54e1aa82-cac4-43e9-8897-3b8e45e41217"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.853480 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.853526 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k754p\" (UniqueName: \"kubernetes.io/projected/54e1aa82-cac4-43e9-8897-3b8e45e41217-kube-api-access-k754p\") on node \"crc\" DevicePath \"\"" Nov 26 08:19:26 crc kubenswrapper[4492]: I1126 08:19:26.853541 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54e1aa82-cac4-43e9-8897-3b8e45e41217-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.218126 4492 generic.go:334] "Generic (PLEG): container finished" podID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerID="5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83" exitCode=0 Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.218213 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-plj5z" event={"ID":"54e1aa82-cac4-43e9-8897-3b8e45e41217","Type":"ContainerDied","Data":"5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83"} Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.218240 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-plj5z" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.218258 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-plj5z" event={"ID":"54e1aa82-cac4-43e9-8897-3b8e45e41217","Type":"ContainerDied","Data":"6cde7bcf673da4a7d9be75dc3b1af49bf688f37bc78d49357c15e38b44d237e9"} Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.218643 4492 scope.go:117] "RemoveContainer" containerID="5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.265524 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-plj5z"] Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.266405 4492 scope.go:117] "RemoveContainer" containerID="b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.332292 4492 scope.go:117] "RemoveContainer" containerID="a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.343996 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-plj5z"] Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.363090 4492 scope.go:117] "RemoveContainer" containerID="5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83" Nov 26 08:19:27 crc kubenswrapper[4492]: E1126 08:19:27.365714 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83\": container with ID starting with 5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83 not found: ID does not exist" containerID="5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.366289 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83"} err="failed to get container status \"5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83\": rpc error: code = NotFound desc = could not find container \"5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83\": container with ID starting with 5b3bbe3998fe4371f533b15fdb426b3279366bd153ae038fb2c52aa8c2e5fc83 not found: ID does not exist" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.366334 4492 scope.go:117] "RemoveContainer" containerID="b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0" Nov 26 08:19:27 crc kubenswrapper[4492]: E1126 08:19:27.366666 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0\": container with ID starting with b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0 not found: ID does not exist" containerID="b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.366696 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0"} err="failed to get container status \"b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0\": rpc error: code = NotFound desc = could not find container \"b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0\": container with ID starting with b67373b0c5323a355c12e6865263692a8e239f20ac38c4fe3744701295c8a6e0 not found: ID does not exist" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.366713 4492 scope.go:117] "RemoveContainer" containerID="a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583" Nov 26 08:19:27 crc kubenswrapper[4492]: E1126 08:19:27.366991 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583\": container with ID starting with a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583 not found: ID does not exist" containerID="a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583" Nov 26 08:19:27 crc kubenswrapper[4492]: I1126 08:19:27.367020 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583"} err="failed to get container status \"a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583\": rpc error: code = NotFound desc = could not find container \"a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583\": container with ID starting with a3b56f6600d4b394d83e0f7ff2f295609051c316947389b442a2323f18fef583 not found: ID does not exist" Nov 26 08:19:28 crc kubenswrapper[4492]: I1126 08:19:28.452464 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54e1aa82-cac4-43e9-8897-3b8e45e41217" path="/var/lib/kubelet/pods/54e1aa82-cac4-43e9-8897-3b8e45e41217/volumes" Nov 26 08:19:29 crc kubenswrapper[4492]: I1126 08:19:29.728483 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:29 crc kubenswrapper[4492]: I1126 08:19:29.793457 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:31 crc kubenswrapper[4492]: I1126 08:19:31.108127 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:31 crc kubenswrapper[4492]: I1126 08:19:31.151617 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:32 crc kubenswrapper[4492]: I1126 08:19:32.926046 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kp9gk"] Nov 26 08:19:32 crc kubenswrapper[4492]: I1126 08:19:32.926693 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kp9gk" podUID="af58319c-2ee4-49b5-811e-ede90d199978" containerName="registry-server" containerID="cri-o://c2453a8ebdc1a49d9f2c4e69bde02287bcc37053acdd615b59fe4761fd8ee53a" gracePeriod=2 Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.303872 4492 generic.go:334] "Generic (PLEG): container finished" podID="af58319c-2ee4-49b5-811e-ede90d199978" containerID="c2453a8ebdc1a49d9f2c4e69bde02287bcc37053acdd615b59fe4761fd8ee53a" exitCode=0 Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.304004 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kp9gk" event={"ID":"af58319c-2ee4-49b5-811e-ede90d199978","Type":"ContainerDied","Data":"c2453a8ebdc1a49d9f2c4e69bde02287bcc37053acdd615b59fe4761fd8ee53a"} Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.424400 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.436466 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-catalog-content\") pod \"af58319c-2ee4-49b5-811e-ede90d199978\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.436541 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-utilities\") pod \"af58319c-2ee4-49b5-811e-ede90d199978\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.436563 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxtzz\" (UniqueName: \"kubernetes.io/projected/af58319c-2ee4-49b5-811e-ede90d199978-kube-api-access-gxtzz\") pod \"af58319c-2ee4-49b5-811e-ede90d199978\" (UID: \"af58319c-2ee4-49b5-811e-ede90d199978\") " Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.437641 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-utilities" (OuterVolumeSpecName: "utilities") pod "af58319c-2ee4-49b5-811e-ede90d199978" (UID: "af58319c-2ee4-49b5-811e-ede90d199978"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.452223 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af58319c-2ee4-49b5-811e-ede90d199978-kube-api-access-gxtzz" (OuterVolumeSpecName: "kube-api-access-gxtzz") pod "af58319c-2ee4-49b5-811e-ede90d199978" (UID: "af58319c-2ee4-49b5-811e-ede90d199978"). InnerVolumeSpecName "kube-api-access-gxtzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.506481 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af58319c-2ee4-49b5-811e-ede90d199978" (UID: "af58319c-2ee4-49b5-811e-ede90d199978"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.541259 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.541294 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af58319c-2ee4-49b5-811e-ede90d199978-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.541309 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxtzz\" (UniqueName: \"kubernetes.io/projected/af58319c-2ee4-49b5-811e-ede90d199978-kube-api-access-gxtzz\") on node \"crc\" DevicePath \"\"" Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.936247 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wpbhr"] Nov 26 08:19:33 crc kubenswrapper[4492]: I1126 08:19:33.936532 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wpbhr" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerName="registry-server" containerID="cri-o://3277d3b976bd41a96514cf901677093070ca94e05288f1853d2b7f87a25ef2e3" gracePeriod=2 Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.318628 4492 generic.go:334] "Generic (PLEG): container finished" podID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerID="3277d3b976bd41a96514cf901677093070ca94e05288f1853d2b7f87a25ef2e3" exitCode=0 Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.319068 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wpbhr" event={"ID":"7dc2a85e-bc55-4e32-9ea5-30389384c8b1","Type":"ContainerDied","Data":"3277d3b976bd41a96514cf901677093070ca94e05288f1853d2b7f87a25ef2e3"} Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.319107 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wpbhr" event={"ID":"7dc2a85e-bc55-4e32-9ea5-30389384c8b1","Type":"ContainerDied","Data":"0eb68e9aaddfc6864a18148dfb9a5a4a73c22abbb262d85032ed2ffbe1512775"} Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.319122 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0eb68e9aaddfc6864a18148dfb9a5a4a73c22abbb262d85032ed2ffbe1512775" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.323023 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kp9gk" event={"ID":"af58319c-2ee4-49b5-811e-ede90d199978","Type":"ContainerDied","Data":"4f95609ebbabf9738905dc822ba83bc3ee6b0cf0d54c44e7b44ebdd6d53d674d"} Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.323110 4492 scope.go:117] "RemoveContainer" containerID="c2453a8ebdc1a49d9f2c4e69bde02287bcc37053acdd615b59fe4761fd8ee53a" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.323372 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kp9gk" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.380556 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.382737 4492 scope.go:117] "RemoveContainer" containerID="092c563a8de2235b20c6d3b7aa58ceb7cd919bea0060399e3a2631acca3526e8" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.398505 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kp9gk"] Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.431591 4492 scope.go:117] "RemoveContainer" containerID="4a6a17bd8a897a189d98557eb886e957bb27d6926c78112872ff4f770282fdb2" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.431755 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kp9gk"] Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.469648 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-utilities\") pod \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.470145 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nzh6\" (UniqueName: \"kubernetes.io/projected/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-kube-api-access-9nzh6\") pod \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.470491 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-catalog-content\") pod \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\" (UID: \"7dc2a85e-bc55-4e32-9ea5-30389384c8b1\") " Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.470795 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-utilities" (OuterVolumeSpecName: "utilities") pod "7dc2a85e-bc55-4e32-9ea5-30389384c8b1" (UID: "7dc2a85e-bc55-4e32-9ea5-30389384c8b1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.472309 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af58319c-2ee4-49b5-811e-ede90d199978" path="/var/lib/kubelet/pods/af58319c-2ee4-49b5-811e-ede90d199978/volumes" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.476089 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-kube-api-access-9nzh6" (OuterVolumeSpecName: "kube-api-access-9nzh6") pod "7dc2a85e-bc55-4e32-9ea5-30389384c8b1" (UID: "7dc2a85e-bc55-4e32-9ea5-30389384c8b1"). InnerVolumeSpecName "kube-api-access-9nzh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.477688 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nzh6\" (UniqueName: \"kubernetes.io/projected/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-kube-api-access-9nzh6\") on node \"crc\" DevicePath \"\"" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.477718 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.540850 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7dc2a85e-bc55-4e32-9ea5-30389384c8b1" (UID: "7dc2a85e-bc55-4e32-9ea5-30389384c8b1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:19:34 crc kubenswrapper[4492]: I1126 08:19:34.580420 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7dc2a85e-bc55-4e32-9ea5-30389384c8b1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:19:35 crc kubenswrapper[4492]: I1126 08:19:35.334905 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wpbhr" Nov 26 08:19:35 crc kubenswrapper[4492]: I1126 08:19:35.382440 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wpbhr"] Nov 26 08:19:35 crc kubenswrapper[4492]: I1126 08:19:35.392808 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wpbhr"] Nov 26 08:19:36 crc kubenswrapper[4492]: I1126 08:19:36.450476 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" path="/var/lib/kubelet/pods/7dc2a85e-bc55-4e32-9ea5-30389384c8b1/volumes" Nov 26 08:19:49 crc kubenswrapper[4492]: I1126 08:19:49.441222 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:19:49 crc kubenswrapper[4492]: I1126 08:19:49.442037 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:19:49 crc kubenswrapper[4492]: I1126 08:19:49.442100 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:19:49 crc kubenswrapper[4492]: I1126 08:19:49.442881 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b02586568611ce8b448b597d243cccc021cccf20898a15d2405c71ed42813d9a"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:19:49 crc kubenswrapper[4492]: I1126 08:19:49.442963 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://b02586568611ce8b448b597d243cccc021cccf20898a15d2405c71ed42813d9a" gracePeriod=600 Nov 26 08:19:50 crc kubenswrapper[4492]: I1126 08:19:50.500601 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="b02586568611ce8b448b597d243cccc021cccf20898a15d2405c71ed42813d9a" exitCode=0 Nov 26 08:19:50 crc kubenswrapper[4492]: I1126 08:19:50.500734 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"b02586568611ce8b448b597d243cccc021cccf20898a15d2405c71ed42813d9a"} Nov 26 08:19:50 crc kubenswrapper[4492]: I1126 08:19:50.500896 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3"} Nov 26 08:19:50 crc kubenswrapper[4492]: I1126 08:19:50.500944 4492 scope.go:117] "RemoveContainer" containerID="2c1c7097f131db25021a698c76cc36529d7461302764c06cea956039d61a5397" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.222262 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zz6bc"] Nov 26 08:20:51 crc kubenswrapper[4492]: E1126 08:20:51.223851 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af58319c-2ee4-49b5-811e-ede90d199978" containerName="extract-utilities" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.223880 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="af58319c-2ee4-49b5-811e-ede90d199978" containerName="extract-utilities" Nov 26 08:20:51 crc kubenswrapper[4492]: E1126 08:20:51.223920 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerName="registry-server" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.223928 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerName="registry-server" Nov 26 08:20:51 crc kubenswrapper[4492]: E1126 08:20:51.223943 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerName="extract-content" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.223951 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerName="extract-content" Nov 26 08:20:51 crc kubenswrapper[4492]: E1126 08:20:51.223967 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af58319c-2ee4-49b5-811e-ede90d199978" containerName="extract-content" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.223973 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="af58319c-2ee4-49b5-811e-ede90d199978" containerName="extract-content" Nov 26 08:20:51 crc kubenswrapper[4492]: E1126 08:20:51.223983 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af58319c-2ee4-49b5-811e-ede90d199978" containerName="registry-server" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.223990 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="af58319c-2ee4-49b5-811e-ede90d199978" containerName="registry-server" Nov 26 08:20:51 crc kubenswrapper[4492]: E1126 08:20:51.224012 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerName="extract-content" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.227214 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerName="extract-content" Nov 26 08:20:51 crc kubenswrapper[4492]: E1126 08:20:51.227261 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerName="extract-utilities" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.227273 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerName="extract-utilities" Nov 26 08:20:51 crc kubenswrapper[4492]: E1126 08:20:51.227290 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerName="extract-utilities" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.227296 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerName="extract-utilities" Nov 26 08:20:51 crc kubenswrapper[4492]: E1126 08:20:51.227306 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerName="registry-server" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.227312 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerName="registry-server" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.227603 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="54e1aa82-cac4-43e9-8897-3b8e45e41217" containerName="registry-server" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.227628 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dc2a85e-bc55-4e32-9ea5-30389384c8b1" containerName="registry-server" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.227642 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="af58319c-2ee4-49b5-811e-ede90d199978" containerName="registry-server" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.229136 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.244936 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zz6bc"] Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.320072 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-utilities\") pod \"redhat-operators-zz6bc\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.320188 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj7px\" (UniqueName: \"kubernetes.io/projected/fc6ade89-1311-472d-8ba8-535d1ee9b766-kube-api-access-mj7px\") pod \"redhat-operators-zz6bc\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.320365 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-catalog-content\") pod \"redhat-operators-zz6bc\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.422131 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj7px\" (UniqueName: \"kubernetes.io/projected/fc6ade89-1311-472d-8ba8-535d1ee9b766-kube-api-access-mj7px\") pod \"redhat-operators-zz6bc\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.422675 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-catalog-content\") pod \"redhat-operators-zz6bc\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.422974 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-utilities\") pod \"redhat-operators-zz6bc\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.423106 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-catalog-content\") pod \"redhat-operators-zz6bc\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.423368 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-utilities\") pod \"redhat-operators-zz6bc\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.455005 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj7px\" (UniqueName: \"kubernetes.io/projected/fc6ade89-1311-472d-8ba8-535d1ee9b766-kube-api-access-mj7px\") pod \"redhat-operators-zz6bc\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:51 crc kubenswrapper[4492]: I1126 08:20:51.547701 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:20:52 crc kubenswrapper[4492]: I1126 08:20:52.018502 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zz6bc"] Nov 26 08:20:52 crc kubenswrapper[4492]: I1126 08:20:52.175921 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6bc" event={"ID":"fc6ade89-1311-472d-8ba8-535d1ee9b766","Type":"ContainerStarted","Data":"39a2add8701d5b2e9693c9b3098962da44dbaa47cad1b78d67c66e4ed0a8c121"} Nov 26 08:20:53 crc kubenswrapper[4492]: I1126 08:20:53.189939 4492 generic.go:334] "Generic (PLEG): container finished" podID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerID="06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5" exitCode=0 Nov 26 08:20:53 crc kubenswrapper[4492]: I1126 08:20:53.190313 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6bc" event={"ID":"fc6ade89-1311-472d-8ba8-535d1ee9b766","Type":"ContainerDied","Data":"06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5"} Nov 26 08:20:55 crc kubenswrapper[4492]: I1126 08:20:55.214935 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6bc" event={"ID":"fc6ade89-1311-472d-8ba8-535d1ee9b766","Type":"ContainerStarted","Data":"d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f"} Nov 26 08:20:57 crc kubenswrapper[4492]: I1126 08:20:57.238147 4492 generic.go:334] "Generic (PLEG): container finished" podID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerID="d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f" exitCode=0 Nov 26 08:20:57 crc kubenswrapper[4492]: I1126 08:20:57.238233 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6bc" event={"ID":"fc6ade89-1311-472d-8ba8-535d1ee9b766","Type":"ContainerDied","Data":"d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f"} Nov 26 08:20:58 crc kubenswrapper[4492]: I1126 08:20:58.256284 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6bc" event={"ID":"fc6ade89-1311-472d-8ba8-535d1ee9b766","Type":"ContainerStarted","Data":"9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118"} Nov 26 08:20:58 crc kubenswrapper[4492]: I1126 08:20:58.276243 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zz6bc" podStartSLOduration=2.730050183 podStartE2EDuration="7.276086508s" podCreationTimestamp="2025-11-26 08:20:51 +0000 UTC" firstStartedPulling="2025-11-26 08:20:53.192454136 +0000 UTC m=+5549.076342433" lastFinishedPulling="2025-11-26 08:20:57.73849046 +0000 UTC m=+5553.622378758" observedRunningTime="2025-11-26 08:20:58.274014231 +0000 UTC m=+5554.157902530" watchObservedRunningTime="2025-11-26 08:20:58.276086508 +0000 UTC m=+5554.159974807" Nov 26 08:21:01 crc kubenswrapper[4492]: I1126 08:21:01.548658 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:21:01 crc kubenswrapper[4492]: I1126 08:21:01.549103 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:21:02 crc kubenswrapper[4492]: I1126 08:21:02.592627 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zz6bc" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerName="registry-server" probeResult="failure" output=< Nov 26 08:21:02 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:21:02 crc kubenswrapper[4492]: > Nov 26 08:21:11 crc kubenswrapper[4492]: I1126 08:21:11.591006 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:21:11 crc kubenswrapper[4492]: I1126 08:21:11.633966 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:21:11 crc kubenswrapper[4492]: I1126 08:21:11.830562 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zz6bc"] Nov 26 08:21:13 crc kubenswrapper[4492]: I1126 08:21:13.411907 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zz6bc" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerName="registry-server" containerID="cri-o://9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118" gracePeriod=2 Nov 26 08:21:13 crc kubenswrapper[4492]: E1126 08:21:13.936740 4492 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.180:54062->192.168.25.180:45641: write tcp 192.168.25.180:54062->192.168.25.180:45641: write: broken pipe Nov 26 08:21:13 crc kubenswrapper[4492]: I1126 08:21:13.973764 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:21:13 crc kubenswrapper[4492]: I1126 08:21:13.983797 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mj7px\" (UniqueName: \"kubernetes.io/projected/fc6ade89-1311-472d-8ba8-535d1ee9b766-kube-api-access-mj7px\") pod \"fc6ade89-1311-472d-8ba8-535d1ee9b766\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " Nov 26 08:21:13 crc kubenswrapper[4492]: I1126 08:21:13.983943 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-catalog-content\") pod \"fc6ade89-1311-472d-8ba8-535d1ee9b766\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " Nov 26 08:21:13 crc kubenswrapper[4492]: I1126 08:21:13.984064 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-utilities\") pod \"fc6ade89-1311-472d-8ba8-535d1ee9b766\" (UID: \"fc6ade89-1311-472d-8ba8-535d1ee9b766\") " Nov 26 08:21:13 crc kubenswrapper[4492]: I1126 08:21:13.984886 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-utilities" (OuterVolumeSpecName: "utilities") pod "fc6ade89-1311-472d-8ba8-535d1ee9b766" (UID: "fc6ade89-1311-472d-8ba8-535d1ee9b766"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:21:13 crc kubenswrapper[4492]: I1126 08:21:13.990479 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc6ade89-1311-472d-8ba8-535d1ee9b766-kube-api-access-mj7px" (OuterVolumeSpecName: "kube-api-access-mj7px") pod "fc6ade89-1311-472d-8ba8-535d1ee9b766" (UID: "fc6ade89-1311-472d-8ba8-535d1ee9b766"). InnerVolumeSpecName "kube-api-access-mj7px". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.059017 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fc6ade89-1311-472d-8ba8-535d1ee9b766" (UID: "fc6ade89-1311-472d-8ba8-535d1ee9b766"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.087308 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mj7px\" (UniqueName: \"kubernetes.io/projected/fc6ade89-1311-472d-8ba8-535d1ee9b766-kube-api-access-mj7px\") on node \"crc\" DevicePath \"\"" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.087342 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.087357 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6ade89-1311-472d-8ba8-535d1ee9b766-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.426277 4492 generic.go:334] "Generic (PLEG): container finished" podID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerID="9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118" exitCode=0 Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.426293 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz6bc" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.426346 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6bc" event={"ID":"fc6ade89-1311-472d-8ba8-535d1ee9b766","Type":"ContainerDied","Data":"9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118"} Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.426455 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6bc" event={"ID":"fc6ade89-1311-472d-8ba8-535d1ee9b766","Type":"ContainerDied","Data":"39a2add8701d5b2e9693c9b3098962da44dbaa47cad1b78d67c66e4ed0a8c121"} Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.426486 4492 scope.go:117] "RemoveContainer" containerID="9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.454869 4492 scope.go:117] "RemoveContainer" containerID="d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.478286 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zz6bc"] Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.486562 4492 scope.go:117] "RemoveContainer" containerID="06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.488670 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zz6bc"] Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.522422 4492 scope.go:117] "RemoveContainer" containerID="9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118" Nov 26 08:21:14 crc kubenswrapper[4492]: E1126 08:21:14.522774 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118\": container with ID starting with 9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118 not found: ID does not exist" containerID="9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.522806 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118"} err="failed to get container status \"9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118\": rpc error: code = NotFound desc = could not find container \"9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118\": container with ID starting with 9b59ea4d5d407a7b6ff8374cb4c2aabe87dff08bb50a5627c6aede0b431e9118 not found: ID does not exist" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.522826 4492 scope.go:117] "RemoveContainer" containerID="d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f" Nov 26 08:21:14 crc kubenswrapper[4492]: E1126 08:21:14.523096 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f\": container with ID starting with d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f not found: ID does not exist" containerID="d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.523137 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f"} err="failed to get container status \"d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f\": rpc error: code = NotFound desc = could not find container \"d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f\": container with ID starting with d2f50b0deac7aae56b41fcfa77df008bc72ed6e8a84222abd909f8ff5a00186f not found: ID does not exist" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.523183 4492 scope.go:117] "RemoveContainer" containerID="06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5" Nov 26 08:21:14 crc kubenswrapper[4492]: E1126 08:21:14.523436 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5\": container with ID starting with 06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5 not found: ID does not exist" containerID="06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5" Nov 26 08:21:14 crc kubenswrapper[4492]: I1126 08:21:14.523457 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5"} err="failed to get container status \"06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5\": rpc error: code = NotFound desc = could not find container \"06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5\": container with ID starting with 06fc50c937f0af4fd85c8a7f675380c09b3a764dc001e461195def01e50451f5 not found: ID does not exist" Nov 26 08:21:16 crc kubenswrapper[4492]: I1126 08:21:16.449060 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" path="/var/lib/kubelet/pods/fc6ade89-1311-472d-8ba8-535d1ee9b766/volumes" Nov 26 08:21:49 crc kubenswrapper[4492]: I1126 08:21:49.441712 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:21:49 crc kubenswrapper[4492]: I1126 08:21:49.442410 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:22:19 crc kubenswrapper[4492]: I1126 08:22:19.441869 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:22:19 crc kubenswrapper[4492]: I1126 08:22:19.442623 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:22:49 crc kubenswrapper[4492]: I1126 08:22:49.441714 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:22:49 crc kubenswrapper[4492]: I1126 08:22:49.442468 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:22:49 crc kubenswrapper[4492]: I1126 08:22:49.442532 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:22:49 crc kubenswrapper[4492]: I1126 08:22:49.443065 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:22:49 crc kubenswrapper[4492]: I1126 08:22:49.443122 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" gracePeriod=600 Nov 26 08:22:49 crc kubenswrapper[4492]: E1126 08:22:49.564123 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:22:50 crc kubenswrapper[4492]: I1126 08:22:50.404369 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" exitCode=0 Nov 26 08:22:50 crc kubenswrapper[4492]: I1126 08:22:50.404461 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3"} Nov 26 08:22:50 crc kubenswrapper[4492]: I1126 08:22:50.404887 4492 scope.go:117] "RemoveContainer" containerID="b02586568611ce8b448b597d243cccc021cccf20898a15d2405c71ed42813d9a" Nov 26 08:22:50 crc kubenswrapper[4492]: I1126 08:22:50.405517 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:22:50 crc kubenswrapper[4492]: E1126 08:22:50.405988 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:23:02 crc kubenswrapper[4492]: I1126 08:23:02.439783 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:23:02 crc kubenswrapper[4492]: E1126 08:23:02.440944 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:23:16 crc kubenswrapper[4492]: I1126 08:23:16.438993 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:23:16 crc kubenswrapper[4492]: E1126 08:23:16.440204 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:23:28 crc kubenswrapper[4492]: I1126 08:23:28.439552 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:23:28 crc kubenswrapper[4492]: E1126 08:23:28.440509 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:23:39 crc kubenswrapper[4492]: I1126 08:23:39.438707 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:23:39 crc kubenswrapper[4492]: E1126 08:23:39.439479 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:23:54 crc kubenswrapper[4492]: I1126 08:23:54.445522 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:23:54 crc kubenswrapper[4492]: E1126 08:23:54.446528 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:24:08 crc kubenswrapper[4492]: I1126 08:24:08.442494 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:24:08 crc kubenswrapper[4492]: E1126 08:24:08.443481 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:24:23 crc kubenswrapper[4492]: I1126 08:24:23.439312 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:24:23 crc kubenswrapper[4492]: E1126 08:24:23.440711 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:24:37 crc kubenswrapper[4492]: I1126 08:24:37.438589 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:24:37 crc kubenswrapper[4492]: E1126 08:24:37.439577 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:24:49 crc kubenswrapper[4492]: I1126 08:24:49.438673 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:24:49 crc kubenswrapper[4492]: E1126 08:24:49.440616 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:25:00 crc kubenswrapper[4492]: I1126 08:25:00.439729 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:25:00 crc kubenswrapper[4492]: E1126 08:25:00.440707 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:25:14 crc kubenswrapper[4492]: I1126 08:25:14.444353 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:25:14 crc kubenswrapper[4492]: E1126 08:25:14.455081 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:25:29 crc kubenswrapper[4492]: I1126 08:25:29.438781 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:25:29 crc kubenswrapper[4492]: E1126 08:25:29.440812 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:25:40 crc kubenswrapper[4492]: I1126 08:25:40.439020 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:25:40 crc kubenswrapper[4492]: E1126 08:25:40.440182 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:25:51 crc kubenswrapper[4492]: I1126 08:25:51.439274 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:25:51 crc kubenswrapper[4492]: E1126 08:25:51.440124 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:26:01 crc kubenswrapper[4492]: I1126 08:26:01.163574 4492 scope.go:117] "RemoveContainer" containerID="7cd3e2d9ea51c3ab7cf85274b82b01c2ce4de9ad6f4c00c945d9f53ae164611e" Nov 26 08:26:01 crc kubenswrapper[4492]: I1126 08:26:01.198521 4492 scope.go:117] "RemoveContainer" containerID="3277d3b976bd41a96514cf901677093070ca94e05288f1853d2b7f87a25ef2e3" Nov 26 08:26:01 crc kubenswrapper[4492]: I1126 08:26:01.230857 4492 scope.go:117] "RemoveContainer" containerID="6ecfaab3ecd10f05c211298a38bd031875e6b647ed50744db01c896c9aeeb3cf" Nov 26 08:26:03 crc kubenswrapper[4492]: I1126 08:26:03.439752 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:26:03 crc kubenswrapper[4492]: E1126 08:26:03.440653 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:26:14 crc kubenswrapper[4492]: I1126 08:26:14.445675 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:26:14 crc kubenswrapper[4492]: E1126 08:26:14.446753 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:26:28 crc kubenswrapper[4492]: I1126 08:26:28.439002 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:26:28 crc kubenswrapper[4492]: E1126 08:26:28.440064 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:26:42 crc kubenswrapper[4492]: I1126 08:26:42.439941 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:26:42 crc kubenswrapper[4492]: E1126 08:26:42.441107 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:26:51 crc kubenswrapper[4492]: E1126 08:26:51.686973 4492 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.180:39922->192.168.25.180:45641: write tcp 192.168.25.180:39922->192.168.25.180:45641: write: connection reset by peer Nov 26 08:26:56 crc kubenswrapper[4492]: I1126 08:26:56.453789 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:26:56 crc kubenswrapper[4492]: E1126 08:26:56.454831 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:27:07 crc kubenswrapper[4492]: I1126 08:27:07.439033 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:27:07 crc kubenswrapper[4492]: E1126 08:27:07.440022 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:27:22 crc kubenswrapper[4492]: I1126 08:27:22.438390 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:27:22 crc kubenswrapper[4492]: E1126 08:27:22.439202 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:27:37 crc kubenswrapper[4492]: I1126 08:27:37.438562 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:27:37 crc kubenswrapper[4492]: E1126 08:27:37.439771 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:27:50 crc kubenswrapper[4492]: I1126 08:27:50.440151 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:27:51 crc kubenswrapper[4492]: I1126 08:27:51.343829 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"bf8b387c53b3dc1535b509352a21b747c1878da29b1b6246819a0c84ffa21202"} Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.525778 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kzsk4"] Nov 26 08:29:24 crc kubenswrapper[4492]: E1126 08:29:24.528852 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerName="extract-content" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.528888 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerName="extract-content" Nov 26 08:29:24 crc kubenswrapper[4492]: E1126 08:29:24.528922 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerName="extract-utilities" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.528928 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerName="extract-utilities" Nov 26 08:29:24 crc kubenswrapper[4492]: E1126 08:29:24.528956 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerName="registry-server" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.528961 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerName="registry-server" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.530044 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc6ade89-1311-472d-8ba8-535d1ee9b766" containerName="registry-server" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.533645 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.548162 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kzsk4"] Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.583609 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-catalog-content\") pod \"community-operators-kzsk4\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.584240 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm7rd\" (UniqueName: \"kubernetes.io/projected/8b01d3de-eb1b-463b-801b-6b6112de1a7f-kube-api-access-sm7rd\") pod \"community-operators-kzsk4\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.584425 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-utilities\") pod \"community-operators-kzsk4\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.686886 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm7rd\" (UniqueName: \"kubernetes.io/projected/8b01d3de-eb1b-463b-801b-6b6112de1a7f-kube-api-access-sm7rd\") pod \"community-operators-kzsk4\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.687068 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-utilities\") pod \"community-operators-kzsk4\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.688480 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-utilities\") pod \"community-operators-kzsk4\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.689690 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-catalog-content\") pod \"community-operators-kzsk4\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.690702 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-catalog-content\") pod \"community-operators-kzsk4\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.709163 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm7rd\" (UniqueName: \"kubernetes.io/projected/8b01d3de-eb1b-463b-801b-6b6112de1a7f-kube-api-access-sm7rd\") pod \"community-operators-kzsk4\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:24 crc kubenswrapper[4492]: I1126 08:29:24.863211 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:25 crc kubenswrapper[4492]: I1126 08:29:25.577285 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kzsk4"] Nov 26 08:29:25 crc kubenswrapper[4492]: W1126 08:29:25.591101 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b01d3de_eb1b_463b_801b_6b6112de1a7f.slice/crio-89c6b11bd96f700a2f69061b347ec55f2df1a68dae7b558d16788185b10122f1 WatchSource:0}: Error finding container 89c6b11bd96f700a2f69061b347ec55f2df1a68dae7b558d16788185b10122f1: Status 404 returned error can't find the container with id 89c6b11bd96f700a2f69061b347ec55f2df1a68dae7b558d16788185b10122f1 Nov 26 08:29:26 crc kubenswrapper[4492]: I1126 08:29:26.270540 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kzsk4" event={"ID":"8b01d3de-eb1b-463b-801b-6b6112de1a7f","Type":"ContainerDied","Data":"7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13"} Nov 26 08:29:26 crc kubenswrapper[4492]: I1126 08:29:26.271038 4492 generic.go:334] "Generic (PLEG): container finished" podID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerID="7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13" exitCode=0 Nov 26 08:29:26 crc kubenswrapper[4492]: I1126 08:29:26.271824 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kzsk4" event={"ID":"8b01d3de-eb1b-463b-801b-6b6112de1a7f","Type":"ContainerStarted","Data":"89c6b11bd96f700a2f69061b347ec55f2df1a68dae7b558d16788185b10122f1"} Nov 26 08:29:26 crc kubenswrapper[4492]: I1126 08:29:26.276383 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:29:27 crc kubenswrapper[4492]: I1126 08:29:27.299201 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kzsk4" event={"ID":"8b01d3de-eb1b-463b-801b-6b6112de1a7f","Type":"ContainerStarted","Data":"fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0"} Nov 26 08:29:29 crc kubenswrapper[4492]: I1126 08:29:29.319095 4492 generic.go:334] "Generic (PLEG): container finished" podID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerID="fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0" exitCode=0 Nov 26 08:29:29 crc kubenswrapper[4492]: I1126 08:29:29.319217 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kzsk4" event={"ID":"8b01d3de-eb1b-463b-801b-6b6112de1a7f","Type":"ContainerDied","Data":"fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0"} Nov 26 08:29:30 crc kubenswrapper[4492]: I1126 08:29:30.374410 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kzsk4" event={"ID":"8b01d3de-eb1b-463b-801b-6b6112de1a7f","Type":"ContainerStarted","Data":"6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12"} Nov 26 08:29:30 crc kubenswrapper[4492]: I1126 08:29:30.405398 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kzsk4" podStartSLOduration=2.860347187 podStartE2EDuration="6.40179737s" podCreationTimestamp="2025-11-26 08:29:24 +0000 UTC" firstStartedPulling="2025-11-26 08:29:26.272840537 +0000 UTC m=+6062.156728825" lastFinishedPulling="2025-11-26 08:29:29.814290709 +0000 UTC m=+6065.698179008" observedRunningTime="2025-11-26 08:29:30.396535867 +0000 UTC m=+6066.280424165" watchObservedRunningTime="2025-11-26 08:29:30.40179737 +0000 UTC m=+6066.285685668" Nov 26 08:29:34 crc kubenswrapper[4492]: I1126 08:29:34.864006 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:34 crc kubenswrapper[4492]: I1126 08:29:34.864774 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:34 crc kubenswrapper[4492]: I1126 08:29:34.910553 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:35 crc kubenswrapper[4492]: I1126 08:29:35.469142 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:35 crc kubenswrapper[4492]: I1126 08:29:35.525432 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kzsk4"] Nov 26 08:29:37 crc kubenswrapper[4492]: I1126 08:29:37.458137 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kzsk4" podUID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerName="registry-server" containerID="cri-o://6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12" gracePeriod=2 Nov 26 08:29:37 crc kubenswrapper[4492]: I1126 08:29:37.924312 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.039743 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-utilities\") pod \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.039822 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sm7rd\" (UniqueName: \"kubernetes.io/projected/8b01d3de-eb1b-463b-801b-6b6112de1a7f-kube-api-access-sm7rd\") pod \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.039861 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-catalog-content\") pod \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\" (UID: \"8b01d3de-eb1b-463b-801b-6b6112de1a7f\") " Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.041651 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-utilities" (OuterVolumeSpecName: "utilities") pod "8b01d3de-eb1b-463b-801b-6b6112de1a7f" (UID: "8b01d3de-eb1b-463b-801b-6b6112de1a7f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.062012 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b01d3de-eb1b-463b-801b-6b6112de1a7f-kube-api-access-sm7rd" (OuterVolumeSpecName: "kube-api-access-sm7rd") pod "8b01d3de-eb1b-463b-801b-6b6112de1a7f" (UID: "8b01d3de-eb1b-463b-801b-6b6112de1a7f"). InnerVolumeSpecName "kube-api-access-sm7rd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.083960 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8b01d3de-eb1b-463b-801b-6b6112de1a7f" (UID: "8b01d3de-eb1b-463b-801b-6b6112de1a7f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.149432 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.149469 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sm7rd\" (UniqueName: \"kubernetes.io/projected/8b01d3de-eb1b-463b-801b-6b6112de1a7f-kube-api-access-sm7rd\") on node \"crc\" DevicePath \"\"" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.149483 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b01d3de-eb1b-463b-801b-6b6112de1a7f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.466897 4492 generic.go:334] "Generic (PLEG): container finished" podID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerID="6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12" exitCode=0 Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.466955 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kzsk4" event={"ID":"8b01d3de-eb1b-463b-801b-6b6112de1a7f","Type":"ContainerDied","Data":"6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12"} Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.466984 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kzsk4" event={"ID":"8b01d3de-eb1b-463b-801b-6b6112de1a7f","Type":"ContainerDied","Data":"89c6b11bd96f700a2f69061b347ec55f2df1a68dae7b558d16788185b10122f1"} Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.467006 4492 scope.go:117] "RemoveContainer" containerID="6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.467149 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kzsk4" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.488146 4492 scope.go:117] "RemoveContainer" containerID="fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.500616 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kzsk4"] Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.509444 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kzsk4"] Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.510307 4492 scope.go:117] "RemoveContainer" containerID="7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.552009 4492 scope.go:117] "RemoveContainer" containerID="6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12" Nov 26 08:29:38 crc kubenswrapper[4492]: E1126 08:29:38.553050 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12\": container with ID starting with 6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12 not found: ID does not exist" containerID="6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.554672 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12"} err="failed to get container status \"6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12\": rpc error: code = NotFound desc = could not find container \"6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12\": container with ID starting with 6309387e8f002751e934f48d01f190b8e1febf31e1772ddc0211986c0c486e12 not found: ID does not exist" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.554709 4492 scope.go:117] "RemoveContainer" containerID="fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0" Nov 26 08:29:38 crc kubenswrapper[4492]: E1126 08:29:38.555093 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0\": container with ID starting with fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0 not found: ID does not exist" containerID="fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.555119 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0"} err="failed to get container status \"fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0\": rpc error: code = NotFound desc = could not find container \"fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0\": container with ID starting with fe9fb8e22f7509047bc81af203cb8836ed5b93579e09ec3438ed8e3cbf708cc0 not found: ID does not exist" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.555134 4492 scope.go:117] "RemoveContainer" containerID="7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13" Nov 26 08:29:38 crc kubenswrapper[4492]: E1126 08:29:38.555495 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13\": container with ID starting with 7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13 not found: ID does not exist" containerID="7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13" Nov 26 08:29:38 crc kubenswrapper[4492]: I1126 08:29:38.555525 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13"} err="failed to get container status \"7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13\": rpc error: code = NotFound desc = could not find container \"7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13\": container with ID starting with 7e109a572a81c03803ac1fbb9f5969a76fb79d8e2fe259e58f11bb51f4d0ac13 not found: ID does not exist" Nov 26 08:29:40 crc kubenswrapper[4492]: I1126 08:29:40.451700 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" path="/var/lib/kubelet/pods/8b01d3de-eb1b-463b-801b-6b6112de1a7f/volumes" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.202469 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp"] Nov 26 08:30:00 crc kubenswrapper[4492]: E1126 08:30:00.203509 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerName="extract-utilities" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.203525 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerName="extract-utilities" Nov 26 08:30:00 crc kubenswrapper[4492]: E1126 08:30:00.203543 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerName="registry-server" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.203549 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerName="registry-server" Nov 26 08:30:00 crc kubenswrapper[4492]: E1126 08:30:00.203585 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerName="extract-content" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.203591 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerName="extract-content" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.203835 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b01d3de-eb1b-463b-801b-6b6112de1a7f" containerName="registry-server" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.205209 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.209212 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp"] Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.215882 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.218984 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.282363 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ee38011-3540-4709-a768-21452d49f874-secret-volume\") pod \"collect-profiles-29402430-mx6lp\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.282420 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l89t9\" (UniqueName: \"kubernetes.io/projected/7ee38011-3540-4709-a768-21452d49f874-kube-api-access-l89t9\") pod \"collect-profiles-29402430-mx6lp\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.282766 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ee38011-3540-4709-a768-21452d49f874-config-volume\") pod \"collect-profiles-29402430-mx6lp\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.385838 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ee38011-3540-4709-a768-21452d49f874-config-volume\") pod \"collect-profiles-29402430-mx6lp\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.385988 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ee38011-3540-4709-a768-21452d49f874-secret-volume\") pod \"collect-profiles-29402430-mx6lp\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.386064 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l89t9\" (UniqueName: \"kubernetes.io/projected/7ee38011-3540-4709-a768-21452d49f874-kube-api-access-l89t9\") pod \"collect-profiles-29402430-mx6lp\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.387090 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ee38011-3540-4709-a768-21452d49f874-config-volume\") pod \"collect-profiles-29402430-mx6lp\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.395401 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ee38011-3540-4709-a768-21452d49f874-secret-volume\") pod \"collect-profiles-29402430-mx6lp\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.401885 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l89t9\" (UniqueName: \"kubernetes.io/projected/7ee38011-3540-4709-a768-21452d49f874-kube-api-access-l89t9\") pod \"collect-profiles-29402430-mx6lp\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:00 crc kubenswrapper[4492]: I1126 08:30:00.526309 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:01 crc kubenswrapper[4492]: I1126 08:30:01.005880 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp"] Nov 26 08:30:01 crc kubenswrapper[4492]: I1126 08:30:01.731890 4492 generic.go:334] "Generic (PLEG): container finished" podID="7ee38011-3540-4709-a768-21452d49f874" containerID="d1ad6767069d8d3223479814797fbd8ecd7efddefcde715eeb67b67216877a2d" exitCode=0 Nov 26 08:30:01 crc kubenswrapper[4492]: I1126 08:30:01.732025 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" event={"ID":"7ee38011-3540-4709-a768-21452d49f874","Type":"ContainerDied","Data":"d1ad6767069d8d3223479814797fbd8ecd7efddefcde715eeb67b67216877a2d"} Nov 26 08:30:01 crc kubenswrapper[4492]: I1126 08:30:01.732297 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" event={"ID":"7ee38011-3540-4709-a768-21452d49f874","Type":"ContainerStarted","Data":"d818ec515229fda2140df9bbda2e3dd15e463786b1b6ea8de68814e7f19915ff"} Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.084846 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.269226 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ee38011-3540-4709-a768-21452d49f874-secret-volume\") pod \"7ee38011-3540-4709-a768-21452d49f874\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.269974 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l89t9\" (UniqueName: \"kubernetes.io/projected/7ee38011-3540-4709-a768-21452d49f874-kube-api-access-l89t9\") pod \"7ee38011-3540-4709-a768-21452d49f874\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.270120 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ee38011-3540-4709-a768-21452d49f874-config-volume\") pod \"7ee38011-3540-4709-a768-21452d49f874\" (UID: \"7ee38011-3540-4709-a768-21452d49f874\") " Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.270641 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ee38011-3540-4709-a768-21452d49f874-config-volume" (OuterVolumeSpecName: "config-volume") pod "7ee38011-3540-4709-a768-21452d49f874" (UID: "7ee38011-3540-4709-a768-21452d49f874"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.271191 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ee38011-3540-4709-a768-21452d49f874-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.277224 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ee38011-3540-4709-a768-21452d49f874-kube-api-access-l89t9" (OuterVolumeSpecName: "kube-api-access-l89t9") pod "7ee38011-3540-4709-a768-21452d49f874" (UID: "7ee38011-3540-4709-a768-21452d49f874"). InnerVolumeSpecName "kube-api-access-l89t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.277524 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ee38011-3540-4709-a768-21452d49f874-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7ee38011-3540-4709-a768-21452d49f874" (UID: "7ee38011-3540-4709-a768-21452d49f874"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.374144 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ee38011-3540-4709-a768-21452d49f874-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.374242 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l89t9\" (UniqueName: \"kubernetes.io/projected/7ee38011-3540-4709-a768-21452d49f874-kube-api-access-l89t9\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.713036 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mr9s4"] Nov 26 08:30:03 crc kubenswrapper[4492]: E1126 08:30:03.721958 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ee38011-3540-4709-a768-21452d49f874" containerName="collect-profiles" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.721992 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ee38011-3540-4709-a768-21452d49f874" containerName="collect-profiles" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.722533 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ee38011-3540-4709-a768-21452d49f874" containerName="collect-profiles" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.723817 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mr9s4"] Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.723910 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.755925 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" event={"ID":"7ee38011-3540-4709-a768-21452d49f874","Type":"ContainerDied","Data":"d818ec515229fda2140df9bbda2e3dd15e463786b1b6ea8de68814e7f19915ff"} Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.755970 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d818ec515229fda2140df9bbda2e3dd15e463786b1b6ea8de68814e7f19915ff" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.755992 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402430-mx6lp" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.884893 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-catalog-content\") pod \"redhat-marketplace-mr9s4\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.885025 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mf5l\" (UniqueName: \"kubernetes.io/projected/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-kube-api-access-9mf5l\") pod \"redhat-marketplace-mr9s4\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.885108 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-utilities\") pod \"redhat-marketplace-mr9s4\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.986819 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-catalog-content\") pod \"redhat-marketplace-mr9s4\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.986871 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mf5l\" (UniqueName: \"kubernetes.io/projected/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-kube-api-access-9mf5l\") pod \"redhat-marketplace-mr9s4\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.986916 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-utilities\") pod \"redhat-marketplace-mr9s4\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.987394 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-utilities\") pod \"redhat-marketplace-mr9s4\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:03 crc kubenswrapper[4492]: I1126 08:30:03.987609 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-catalog-content\") pod \"redhat-marketplace-mr9s4\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:04 crc kubenswrapper[4492]: I1126 08:30:04.011131 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mf5l\" (UniqueName: \"kubernetes.io/projected/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-kube-api-access-9mf5l\") pod \"redhat-marketplace-mr9s4\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:04 crc kubenswrapper[4492]: I1126 08:30:04.039824 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:04 crc kubenswrapper[4492]: I1126 08:30:04.202317 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd"] Nov 26 08:30:04 crc kubenswrapper[4492]: I1126 08:30:04.212977 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402385-mvfvd"] Nov 26 08:30:04 crc kubenswrapper[4492]: I1126 08:30:04.478347 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="476ba433-3e71-45dc-9e94-bfb9ca40a73b" path="/var/lib/kubelet/pods/476ba433-3e71-45dc-9e94-bfb9ca40a73b/volumes" Nov 26 08:30:04 crc kubenswrapper[4492]: I1126 08:30:04.528365 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mr9s4"] Nov 26 08:30:04 crc kubenswrapper[4492]: I1126 08:30:04.766139 4492 generic.go:334] "Generic (PLEG): container finished" podID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerID="f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588" exitCode=0 Nov 26 08:30:04 crc kubenswrapper[4492]: I1126 08:30:04.766340 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mr9s4" event={"ID":"0c0626b7-ce33-42dc-97d9-d417ae3c3b89","Type":"ContainerDied","Data":"f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588"} Nov 26 08:30:04 crc kubenswrapper[4492]: I1126 08:30:04.766419 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mr9s4" event={"ID":"0c0626b7-ce33-42dc-97d9-d417ae3c3b89","Type":"ContainerStarted","Data":"40b4f25b58395303f9560c9ecdb92e262b04fe0c01c6367a16aef56bfeee4d54"} Nov 26 08:30:05 crc kubenswrapper[4492]: I1126 08:30:05.783630 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mr9s4" event={"ID":"0c0626b7-ce33-42dc-97d9-d417ae3c3b89","Type":"ContainerStarted","Data":"a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203"} Nov 26 08:30:06 crc kubenswrapper[4492]: I1126 08:30:06.796985 4492 generic.go:334] "Generic (PLEG): container finished" podID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerID="a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203" exitCode=0 Nov 26 08:30:06 crc kubenswrapper[4492]: I1126 08:30:06.797054 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mr9s4" event={"ID":"0c0626b7-ce33-42dc-97d9-d417ae3c3b89","Type":"ContainerDied","Data":"a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203"} Nov 26 08:30:07 crc kubenswrapper[4492]: I1126 08:30:07.810139 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mr9s4" event={"ID":"0c0626b7-ce33-42dc-97d9-d417ae3c3b89","Type":"ContainerStarted","Data":"b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc"} Nov 26 08:30:07 crc kubenswrapper[4492]: I1126 08:30:07.837561 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mr9s4" podStartSLOduration=2.32292264 podStartE2EDuration="4.837537858s" podCreationTimestamp="2025-11-26 08:30:03 +0000 UTC" firstStartedPulling="2025-11-26 08:30:04.767707244 +0000 UTC m=+6100.651595542" lastFinishedPulling="2025-11-26 08:30:07.282322462 +0000 UTC m=+6103.166210760" observedRunningTime="2025-11-26 08:30:07.827389266 +0000 UTC m=+6103.711277564" watchObservedRunningTime="2025-11-26 08:30:07.837537858 +0000 UTC m=+6103.721426156" Nov 26 08:30:14 crc kubenswrapper[4492]: I1126 08:30:14.040073 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:14 crc kubenswrapper[4492]: I1126 08:30:14.040770 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:14 crc kubenswrapper[4492]: I1126 08:30:14.087987 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:14 crc kubenswrapper[4492]: I1126 08:30:14.926652 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:14 crc kubenswrapper[4492]: I1126 08:30:14.973453 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mr9s4"] Nov 26 08:30:16 crc kubenswrapper[4492]: I1126 08:30:16.903418 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mr9s4" podUID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerName="registry-server" containerID="cri-o://b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc" gracePeriod=2 Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.369722 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.432715 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mf5l\" (UniqueName: \"kubernetes.io/projected/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-kube-api-access-9mf5l\") pod \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.432874 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-utilities\") pod \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.433229 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-catalog-content\") pod \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\" (UID: \"0c0626b7-ce33-42dc-97d9-d417ae3c3b89\") " Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.446131 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-utilities" (OuterVolumeSpecName: "utilities") pod "0c0626b7-ce33-42dc-97d9-d417ae3c3b89" (UID: "0c0626b7-ce33-42dc-97d9-d417ae3c3b89"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.447556 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-kube-api-access-9mf5l" (OuterVolumeSpecName: "kube-api-access-9mf5l") pod "0c0626b7-ce33-42dc-97d9-d417ae3c3b89" (UID: "0c0626b7-ce33-42dc-97d9-d417ae3c3b89"). InnerVolumeSpecName "kube-api-access-9mf5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.463437 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0c0626b7-ce33-42dc-97d9-d417ae3c3b89" (UID: "0c0626b7-ce33-42dc-97d9-d417ae3c3b89"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.536709 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mf5l\" (UniqueName: \"kubernetes.io/projected/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-kube-api-access-9mf5l\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.536741 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.536753 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c0626b7-ce33-42dc-97d9-d417ae3c3b89-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.918843 4492 generic.go:334] "Generic (PLEG): container finished" podID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerID="b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc" exitCode=0 Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.918923 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mr9s4" event={"ID":"0c0626b7-ce33-42dc-97d9-d417ae3c3b89","Type":"ContainerDied","Data":"b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc"} Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.918986 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mr9s4" event={"ID":"0c0626b7-ce33-42dc-97d9-d417ae3c3b89","Type":"ContainerDied","Data":"40b4f25b58395303f9560c9ecdb92e262b04fe0c01c6367a16aef56bfeee4d54"} Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.919025 4492 scope.go:117] "RemoveContainer" containerID="b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.919314 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mr9s4" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.947858 4492 scope.go:117] "RemoveContainer" containerID="a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203" Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.970494 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mr9s4"] Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.984658 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mr9s4"] Nov 26 08:30:17 crc kubenswrapper[4492]: I1126 08:30:17.996977 4492 scope.go:117] "RemoveContainer" containerID="f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588" Nov 26 08:30:18 crc kubenswrapper[4492]: I1126 08:30:18.019086 4492 scope.go:117] "RemoveContainer" containerID="b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc" Nov 26 08:30:18 crc kubenswrapper[4492]: E1126 08:30:18.019427 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc\": container with ID starting with b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc not found: ID does not exist" containerID="b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc" Nov 26 08:30:18 crc kubenswrapper[4492]: I1126 08:30:18.019469 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc"} err="failed to get container status \"b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc\": rpc error: code = NotFound desc = could not find container \"b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc\": container with ID starting with b84cc69c36378874bd87a8d1adddcbdcc2ee7abb1b4eba4f578cf44fdf65fabc not found: ID does not exist" Nov 26 08:30:18 crc kubenswrapper[4492]: I1126 08:30:18.019494 4492 scope.go:117] "RemoveContainer" containerID="a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203" Nov 26 08:30:18 crc kubenswrapper[4492]: E1126 08:30:18.019830 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203\": container with ID starting with a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203 not found: ID does not exist" containerID="a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203" Nov 26 08:30:18 crc kubenswrapper[4492]: I1126 08:30:18.019861 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203"} err="failed to get container status \"a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203\": rpc error: code = NotFound desc = could not find container \"a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203\": container with ID starting with a94cacdc2ef67601ebabbf7d3b20307e0967893a52236ed12ddf285056b43203 not found: ID does not exist" Nov 26 08:30:18 crc kubenswrapper[4492]: I1126 08:30:18.019877 4492 scope.go:117] "RemoveContainer" containerID="f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588" Nov 26 08:30:18 crc kubenswrapper[4492]: E1126 08:30:18.020305 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588\": container with ID starting with f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588 not found: ID does not exist" containerID="f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588" Nov 26 08:30:18 crc kubenswrapper[4492]: I1126 08:30:18.020345 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588"} err="failed to get container status \"f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588\": rpc error: code = NotFound desc = could not find container \"f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588\": container with ID starting with f74d9defd0e13fd6e8ef25797c966de0b47712b03def070495debca5126b6588 not found: ID does not exist" Nov 26 08:30:18 crc kubenswrapper[4492]: I1126 08:30:18.452477 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" path="/var/lib/kubelet/pods/0c0626b7-ce33-42dc-97d9-d417ae3c3b89/volumes" Nov 26 08:30:19 crc kubenswrapper[4492]: I1126 08:30:19.442132 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:30:19 crc kubenswrapper[4492]: I1126 08:30:19.442300 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:30:49 crc kubenswrapper[4492]: I1126 08:30:49.441301 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:30:49 crc kubenswrapper[4492]: I1126 08:30:49.442064 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:31:01 crc kubenswrapper[4492]: I1126 08:31:01.383616 4492 scope.go:117] "RemoveContainer" containerID="8c2e245a131afeb8ff951a0d0027918abe07b3ee0ff33130bf9611156ddd7171" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.770225 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9vnqj"] Nov 26 08:31:05 crc kubenswrapper[4492]: E1126 08:31:05.771193 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerName="registry-server" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.771209 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerName="registry-server" Nov 26 08:31:05 crc kubenswrapper[4492]: E1126 08:31:05.771236 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerName="extract-content" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.771241 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerName="extract-content" Nov 26 08:31:05 crc kubenswrapper[4492]: E1126 08:31:05.771272 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerName="extract-utilities" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.771278 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerName="extract-utilities" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.771492 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c0626b7-ce33-42dc-97d9-d417ae3c3b89" containerName="registry-server" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.776326 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.784126 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9vnqj"] Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.805902 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-catalog-content\") pod \"redhat-operators-9vnqj\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.806129 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-utilities\") pod \"redhat-operators-9vnqj\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.806227 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8ps7\" (UniqueName: \"kubernetes.io/projected/bf7ca456-5942-4afd-80f5-abafd240f511-kube-api-access-q8ps7\") pod \"redhat-operators-9vnqj\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.908231 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-utilities\") pod \"redhat-operators-9vnqj\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.908319 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8ps7\" (UniqueName: \"kubernetes.io/projected/bf7ca456-5942-4afd-80f5-abafd240f511-kube-api-access-q8ps7\") pod \"redhat-operators-9vnqj\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.908378 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-catalog-content\") pod \"redhat-operators-9vnqj\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.908794 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-utilities\") pod \"redhat-operators-9vnqj\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.908863 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-catalog-content\") pod \"redhat-operators-9vnqj\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:05 crc kubenswrapper[4492]: I1126 08:31:05.927692 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8ps7\" (UniqueName: \"kubernetes.io/projected/bf7ca456-5942-4afd-80f5-abafd240f511-kube-api-access-q8ps7\") pod \"redhat-operators-9vnqj\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:06 crc kubenswrapper[4492]: I1126 08:31:06.099230 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:06 crc kubenswrapper[4492]: I1126 08:31:06.724053 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9vnqj"] Nov 26 08:31:07 crc kubenswrapper[4492]: I1126 08:31:07.498045 4492 generic.go:334] "Generic (PLEG): container finished" podID="bf7ca456-5942-4afd-80f5-abafd240f511" containerID="43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05" exitCode=0 Nov 26 08:31:07 crc kubenswrapper[4492]: I1126 08:31:07.498147 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9vnqj" event={"ID":"bf7ca456-5942-4afd-80f5-abafd240f511","Type":"ContainerDied","Data":"43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05"} Nov 26 08:31:07 crc kubenswrapper[4492]: I1126 08:31:07.498541 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9vnqj" event={"ID":"bf7ca456-5942-4afd-80f5-abafd240f511","Type":"ContainerStarted","Data":"d20db1661e6c363a76362df98c5e84261ed15a7c0a4d849ea6d8a46c83f401d0"} Nov 26 08:31:08 crc kubenswrapper[4492]: I1126 08:31:08.526588 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9vnqj" event={"ID":"bf7ca456-5942-4afd-80f5-abafd240f511","Type":"ContainerStarted","Data":"4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8"} Nov 26 08:31:11 crc kubenswrapper[4492]: I1126 08:31:11.554759 4492 generic.go:334] "Generic (PLEG): container finished" podID="bf7ca456-5942-4afd-80f5-abafd240f511" containerID="4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8" exitCode=0 Nov 26 08:31:11 crc kubenswrapper[4492]: I1126 08:31:11.554807 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9vnqj" event={"ID":"bf7ca456-5942-4afd-80f5-abafd240f511","Type":"ContainerDied","Data":"4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8"} Nov 26 08:31:12 crc kubenswrapper[4492]: I1126 08:31:12.573611 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9vnqj" event={"ID":"bf7ca456-5942-4afd-80f5-abafd240f511","Type":"ContainerStarted","Data":"505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd"} Nov 26 08:31:12 crc kubenswrapper[4492]: I1126 08:31:12.604456 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9vnqj" podStartSLOduration=3.072115536 podStartE2EDuration="7.601242726s" podCreationTimestamp="2025-11-26 08:31:05 +0000 UTC" firstStartedPulling="2025-11-26 08:31:07.502032946 +0000 UTC m=+6163.385921234" lastFinishedPulling="2025-11-26 08:31:12.031160126 +0000 UTC m=+6167.915048424" observedRunningTime="2025-11-26 08:31:12.588863331 +0000 UTC m=+6168.472751628" watchObservedRunningTime="2025-11-26 08:31:12.601242726 +0000 UTC m=+6168.485131024" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.101266 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.102051 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.619899 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dn5ft"] Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.622252 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.629256 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dn5ft"] Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.666468 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-utilities\") pod \"certified-operators-dn5ft\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.666514 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-catalog-content\") pod \"certified-operators-dn5ft\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.666579 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbpmp\" (UniqueName: \"kubernetes.io/projected/560f776b-4180-4a1d-921c-277378e3eb27-kube-api-access-kbpmp\") pod \"certified-operators-dn5ft\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.768631 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-utilities\") pod \"certified-operators-dn5ft\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.768683 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-catalog-content\") pod \"certified-operators-dn5ft\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.768728 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbpmp\" (UniqueName: \"kubernetes.io/projected/560f776b-4180-4a1d-921c-277378e3eb27-kube-api-access-kbpmp\") pod \"certified-operators-dn5ft\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.769454 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-catalog-content\") pod \"certified-operators-dn5ft\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.769956 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-utilities\") pod \"certified-operators-dn5ft\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.798967 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbpmp\" (UniqueName: \"kubernetes.io/projected/560f776b-4180-4a1d-921c-277378e3eb27-kube-api-access-kbpmp\") pod \"certified-operators-dn5ft\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:16 crc kubenswrapper[4492]: I1126 08:31:16.939558 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:17 crc kubenswrapper[4492]: I1126 08:31:17.146989 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9vnqj" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="registry-server" probeResult="failure" output=< Nov 26 08:31:17 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:31:17 crc kubenswrapper[4492]: > Nov 26 08:31:17 crc kubenswrapper[4492]: I1126 08:31:17.596091 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dn5ft"] Nov 26 08:31:18 crc kubenswrapper[4492]: I1126 08:31:18.642220 4492 generic.go:334] "Generic (PLEG): container finished" podID="560f776b-4180-4a1d-921c-277378e3eb27" containerID="352cad0bf3f43a4ae8c12a5c4284379d135cb3ac0a189d2a74e71aeef293da96" exitCode=0 Nov 26 08:31:18 crc kubenswrapper[4492]: I1126 08:31:18.642292 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dn5ft" event={"ID":"560f776b-4180-4a1d-921c-277378e3eb27","Type":"ContainerDied","Data":"352cad0bf3f43a4ae8c12a5c4284379d135cb3ac0a189d2a74e71aeef293da96"} Nov 26 08:31:18 crc kubenswrapper[4492]: I1126 08:31:18.642549 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dn5ft" event={"ID":"560f776b-4180-4a1d-921c-277378e3eb27","Type":"ContainerStarted","Data":"9f17d7a85c841e6301191696ddbd6c5953a11767c8d01f59add52e05bc7ea345"} Nov 26 08:31:19 crc kubenswrapper[4492]: I1126 08:31:19.441152 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:31:19 crc kubenswrapper[4492]: I1126 08:31:19.441358 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:31:19 crc kubenswrapper[4492]: I1126 08:31:19.441431 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:31:19 crc kubenswrapper[4492]: I1126 08:31:19.441968 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bf8b387c53b3dc1535b509352a21b747c1878da29b1b6246819a0c84ffa21202"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:31:19 crc kubenswrapper[4492]: I1126 08:31:19.442049 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://bf8b387c53b3dc1535b509352a21b747c1878da29b1b6246819a0c84ffa21202" gracePeriod=600 Nov 26 08:31:19 crc kubenswrapper[4492]: I1126 08:31:19.661463 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dn5ft" event={"ID":"560f776b-4180-4a1d-921c-277378e3eb27","Type":"ContainerStarted","Data":"cc8df3a2876e4aed31139c256be6d5ac09c032e556c33d21d03d5debee238e5b"} Nov 26 08:31:19 crc kubenswrapper[4492]: I1126 08:31:19.666272 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="bf8b387c53b3dc1535b509352a21b747c1878da29b1b6246819a0c84ffa21202" exitCode=0 Nov 26 08:31:19 crc kubenswrapper[4492]: I1126 08:31:19.666415 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"bf8b387c53b3dc1535b509352a21b747c1878da29b1b6246819a0c84ffa21202"} Nov 26 08:31:19 crc kubenswrapper[4492]: I1126 08:31:19.666525 4492 scope.go:117] "RemoveContainer" containerID="0bef70ebc6b8e4ed62ac66ad1ca221d9d32cc9c38ff5d2136343f1b32d9200c3" Nov 26 08:31:20 crc kubenswrapper[4492]: I1126 08:31:20.678121 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc"} Nov 26 08:31:20 crc kubenswrapper[4492]: I1126 08:31:20.680729 4492 generic.go:334] "Generic (PLEG): container finished" podID="560f776b-4180-4a1d-921c-277378e3eb27" containerID="cc8df3a2876e4aed31139c256be6d5ac09c032e556c33d21d03d5debee238e5b" exitCode=0 Nov 26 08:31:20 crc kubenswrapper[4492]: I1126 08:31:20.680808 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dn5ft" event={"ID":"560f776b-4180-4a1d-921c-277378e3eb27","Type":"ContainerDied","Data":"cc8df3a2876e4aed31139c256be6d5ac09c032e556c33d21d03d5debee238e5b"} Nov 26 08:31:21 crc kubenswrapper[4492]: I1126 08:31:21.714014 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dn5ft" event={"ID":"560f776b-4180-4a1d-921c-277378e3eb27","Type":"ContainerStarted","Data":"e607e12a67573ae3bc7e1fbad4d60e42a4eb7f79f01a8f4d46427be7fc1c6efc"} Nov 26 08:31:21 crc kubenswrapper[4492]: I1126 08:31:21.746254 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dn5ft" podStartSLOduration=3.205258112 podStartE2EDuration="5.746228363s" podCreationTimestamp="2025-11-26 08:31:16 +0000 UTC" firstStartedPulling="2025-11-26 08:31:18.644579624 +0000 UTC m=+6174.528467922" lastFinishedPulling="2025-11-26 08:31:21.185549874 +0000 UTC m=+6177.069438173" observedRunningTime="2025-11-26 08:31:21.737100349 +0000 UTC m=+6177.620988648" watchObservedRunningTime="2025-11-26 08:31:21.746228363 +0000 UTC m=+6177.630116660" Nov 26 08:31:26 crc kubenswrapper[4492]: I1126 08:31:26.940660 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:26 crc kubenswrapper[4492]: I1126 08:31:26.941524 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:26 crc kubenswrapper[4492]: I1126 08:31:26.990047 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:27 crc kubenswrapper[4492]: I1126 08:31:27.182832 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9vnqj" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="registry-server" probeResult="failure" output=< Nov 26 08:31:27 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:31:27 crc kubenswrapper[4492]: > Nov 26 08:31:27 crc kubenswrapper[4492]: I1126 08:31:27.819849 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:30 crc kubenswrapper[4492]: I1126 08:31:30.608422 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dn5ft"] Nov 26 08:31:30 crc kubenswrapper[4492]: I1126 08:31:30.610323 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dn5ft" podUID="560f776b-4180-4a1d-921c-277378e3eb27" containerName="registry-server" containerID="cri-o://e607e12a67573ae3bc7e1fbad4d60e42a4eb7f79f01a8f4d46427be7fc1c6efc" gracePeriod=2 Nov 26 08:31:30 crc kubenswrapper[4492]: I1126 08:31:30.817991 4492 generic.go:334] "Generic (PLEG): container finished" podID="560f776b-4180-4a1d-921c-277378e3eb27" containerID="e607e12a67573ae3bc7e1fbad4d60e42a4eb7f79f01a8f4d46427be7fc1c6efc" exitCode=0 Nov 26 08:31:30 crc kubenswrapper[4492]: I1126 08:31:30.818161 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dn5ft" event={"ID":"560f776b-4180-4a1d-921c-277378e3eb27","Type":"ContainerDied","Data":"e607e12a67573ae3bc7e1fbad4d60e42a4eb7f79f01a8f4d46427be7fc1c6efc"} Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.163814 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.179498 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-utilities\") pod \"560f776b-4180-4a1d-921c-277378e3eb27\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.179570 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-catalog-content\") pod \"560f776b-4180-4a1d-921c-277378e3eb27\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.179645 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbpmp\" (UniqueName: \"kubernetes.io/projected/560f776b-4180-4a1d-921c-277378e3eb27-kube-api-access-kbpmp\") pod \"560f776b-4180-4a1d-921c-277378e3eb27\" (UID: \"560f776b-4180-4a1d-921c-277378e3eb27\") " Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.182100 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-utilities" (OuterVolumeSpecName: "utilities") pod "560f776b-4180-4a1d-921c-277378e3eb27" (UID: "560f776b-4180-4a1d-921c-277378e3eb27"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.189646 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/560f776b-4180-4a1d-921c-277378e3eb27-kube-api-access-kbpmp" (OuterVolumeSpecName: "kube-api-access-kbpmp") pod "560f776b-4180-4a1d-921c-277378e3eb27" (UID: "560f776b-4180-4a1d-921c-277378e3eb27"). InnerVolumeSpecName "kube-api-access-kbpmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.219198 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "560f776b-4180-4a1d-921c-277378e3eb27" (UID: "560f776b-4180-4a1d-921c-277378e3eb27"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.283102 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.283136 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/560f776b-4180-4a1d-921c-277378e3eb27-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.283151 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbpmp\" (UniqueName: \"kubernetes.io/projected/560f776b-4180-4a1d-921c-277378e3eb27-kube-api-access-kbpmp\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.847899 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dn5ft" event={"ID":"560f776b-4180-4a1d-921c-277378e3eb27","Type":"ContainerDied","Data":"9f17d7a85c841e6301191696ddbd6c5953a11767c8d01f59add52e05bc7ea345"} Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.848350 4492 scope.go:117] "RemoveContainer" containerID="e607e12a67573ae3bc7e1fbad4d60e42a4eb7f79f01a8f4d46427be7fc1c6efc" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.850912 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dn5ft" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.894216 4492 scope.go:117] "RemoveContainer" containerID="cc8df3a2876e4aed31139c256be6d5ac09c032e556c33d21d03d5debee238e5b" Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.915462 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dn5ft"] Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.925515 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dn5ft"] Nov 26 08:31:31 crc kubenswrapper[4492]: I1126 08:31:31.933513 4492 scope.go:117] "RemoveContainer" containerID="352cad0bf3f43a4ae8c12a5c4284379d135cb3ac0a189d2a74e71aeef293da96" Nov 26 08:31:32 crc kubenswrapper[4492]: I1126 08:31:32.451455 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="560f776b-4180-4a1d-921c-277378e3eb27" path="/var/lib/kubelet/pods/560f776b-4180-4a1d-921c-277378e3eb27/volumes" Nov 26 08:31:36 crc kubenswrapper[4492]: I1126 08:31:36.157799 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:36 crc kubenswrapper[4492]: I1126 08:31:36.201420 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:37 crc kubenswrapper[4492]: I1126 08:31:37.407727 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9vnqj"] Nov 26 08:31:37 crc kubenswrapper[4492]: I1126 08:31:37.914077 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9vnqj" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="registry-server" containerID="cri-o://505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd" gracePeriod=2 Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.393115 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.458454 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8ps7\" (UniqueName: \"kubernetes.io/projected/bf7ca456-5942-4afd-80f5-abafd240f511-kube-api-access-q8ps7\") pod \"bf7ca456-5942-4afd-80f5-abafd240f511\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.458879 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-catalog-content\") pod \"bf7ca456-5942-4afd-80f5-abafd240f511\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.458906 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-utilities\") pod \"bf7ca456-5942-4afd-80f5-abafd240f511\" (UID: \"bf7ca456-5942-4afd-80f5-abafd240f511\") " Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.459615 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-utilities" (OuterVolumeSpecName: "utilities") pod "bf7ca456-5942-4afd-80f5-abafd240f511" (UID: "bf7ca456-5942-4afd-80f5-abafd240f511"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.466367 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf7ca456-5942-4afd-80f5-abafd240f511-kube-api-access-q8ps7" (OuterVolumeSpecName: "kube-api-access-q8ps7") pod "bf7ca456-5942-4afd-80f5-abafd240f511" (UID: "bf7ca456-5942-4afd-80f5-abafd240f511"). InnerVolumeSpecName "kube-api-access-q8ps7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.534921 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bf7ca456-5942-4afd-80f5-abafd240f511" (UID: "bf7ca456-5942-4afd-80f5-abafd240f511"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.563523 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.563857 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf7ca456-5942-4afd-80f5-abafd240f511-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.563898 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8ps7\" (UniqueName: \"kubernetes.io/projected/bf7ca456-5942-4afd-80f5-abafd240f511-kube-api-access-q8ps7\") on node \"crc\" DevicePath \"\"" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.927881 4492 generic.go:334] "Generic (PLEG): container finished" podID="bf7ca456-5942-4afd-80f5-abafd240f511" containerID="505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd" exitCode=0 Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.928255 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9vnqj" event={"ID":"bf7ca456-5942-4afd-80f5-abafd240f511","Type":"ContainerDied","Data":"505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd"} Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.928302 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9vnqj" event={"ID":"bf7ca456-5942-4afd-80f5-abafd240f511","Type":"ContainerDied","Data":"d20db1661e6c363a76362df98c5e84261ed15a7c0a4d849ea6d8a46c83f401d0"} Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.928327 4492 scope.go:117] "RemoveContainer" containerID="505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.928525 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9vnqj" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.971010 4492 scope.go:117] "RemoveContainer" containerID="4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8" Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.974418 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9vnqj"] Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.985810 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9vnqj"] Nov 26 08:31:38 crc kubenswrapper[4492]: I1126 08:31:38.993868 4492 scope.go:117] "RemoveContainer" containerID="43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05" Nov 26 08:31:39 crc kubenswrapper[4492]: I1126 08:31:39.034660 4492 scope.go:117] "RemoveContainer" containerID="505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd" Nov 26 08:31:39 crc kubenswrapper[4492]: E1126 08:31:39.036498 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd\": container with ID starting with 505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd not found: ID does not exist" containerID="505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd" Nov 26 08:31:39 crc kubenswrapper[4492]: I1126 08:31:39.036540 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd"} err="failed to get container status \"505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd\": rpc error: code = NotFound desc = could not find container \"505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd\": container with ID starting with 505f557e8fcbd605062eb1d135f422de37823bff2eaf4838ffe8a872eb574bdd not found: ID does not exist" Nov 26 08:31:39 crc kubenswrapper[4492]: I1126 08:31:39.036569 4492 scope.go:117] "RemoveContainer" containerID="4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8" Nov 26 08:31:39 crc kubenswrapper[4492]: E1126 08:31:39.037040 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8\": container with ID starting with 4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8 not found: ID does not exist" containerID="4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8" Nov 26 08:31:39 crc kubenswrapper[4492]: I1126 08:31:39.037081 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8"} err="failed to get container status \"4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8\": rpc error: code = NotFound desc = could not find container \"4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8\": container with ID starting with 4e4a4db6e51d2f7a55627a67ca626e4ffa3355101567ab15cdf88b87add464a8 not found: ID does not exist" Nov 26 08:31:39 crc kubenswrapper[4492]: I1126 08:31:39.037112 4492 scope.go:117] "RemoveContainer" containerID="43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05" Nov 26 08:31:39 crc kubenswrapper[4492]: E1126 08:31:39.037542 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05\": container with ID starting with 43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05 not found: ID does not exist" containerID="43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05" Nov 26 08:31:39 crc kubenswrapper[4492]: I1126 08:31:39.037573 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05"} err="failed to get container status \"43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05\": rpc error: code = NotFound desc = could not find container \"43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05\": container with ID starting with 43aab4d519180dcfd78c69d1b01c2bf8302470c0ad736cddd419c69d99a3ca05 not found: ID does not exist" Nov 26 08:31:40 crc kubenswrapper[4492]: I1126 08:31:40.452042 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" path="/var/lib/kubelet/pods/bf7ca456-5942-4afd-80f5-abafd240f511/volumes" Nov 26 08:33:19 crc kubenswrapper[4492]: I1126 08:33:19.441515 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:33:19 crc kubenswrapper[4492]: I1126 08:33:19.442243 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:33:49 crc kubenswrapper[4492]: I1126 08:33:49.441592 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:33:49 crc kubenswrapper[4492]: I1126 08:33:49.442353 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:34:19 crc kubenswrapper[4492]: I1126 08:34:19.441325 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:34:19 crc kubenswrapper[4492]: I1126 08:34:19.442019 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:34:19 crc kubenswrapper[4492]: I1126 08:34:19.442090 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:34:19 crc kubenswrapper[4492]: I1126 08:34:19.442686 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:34:19 crc kubenswrapper[4492]: I1126 08:34:19.442749 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" gracePeriod=600 Nov 26 08:34:19 crc kubenswrapper[4492]: E1126 08:34:19.574156 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:34:20 crc kubenswrapper[4492]: I1126 08:34:20.547617 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" exitCode=0 Nov 26 08:34:20 crc kubenswrapper[4492]: I1126 08:34:20.547682 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc"} Nov 26 08:34:20 crc kubenswrapper[4492]: I1126 08:34:20.547745 4492 scope.go:117] "RemoveContainer" containerID="bf8b387c53b3dc1535b509352a21b747c1878da29b1b6246819a0c84ffa21202" Nov 26 08:34:20 crc kubenswrapper[4492]: I1126 08:34:20.548894 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:34:20 crc kubenswrapper[4492]: E1126 08:34:20.549416 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:34:34 crc kubenswrapper[4492]: I1126 08:34:34.445360 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:34:34 crc kubenswrapper[4492]: E1126 08:34:34.446705 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:34:47 crc kubenswrapper[4492]: I1126 08:34:47.439234 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:34:47 crc kubenswrapper[4492]: E1126 08:34:47.440147 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:35:00 crc kubenswrapper[4492]: I1126 08:35:00.439408 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:35:00 crc kubenswrapper[4492]: E1126 08:35:00.441048 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:35:11 crc kubenswrapper[4492]: I1126 08:35:11.439393 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:35:11 crc kubenswrapper[4492]: E1126 08:35:11.440725 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:35:26 crc kubenswrapper[4492]: I1126 08:35:26.439199 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:35:26 crc kubenswrapper[4492]: E1126 08:35:26.440373 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:35:38 crc kubenswrapper[4492]: I1126 08:35:38.438791 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:35:38 crc kubenswrapper[4492]: E1126 08:35:38.439872 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:35:52 crc kubenswrapper[4492]: I1126 08:35:52.438281 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:35:52 crc kubenswrapper[4492]: E1126 08:35:52.439293 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:36:06 crc kubenswrapper[4492]: I1126 08:36:06.439702 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:36:06 crc kubenswrapper[4492]: E1126 08:36:06.440525 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:36:19 crc kubenswrapper[4492]: I1126 08:36:19.438909 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:36:19 crc kubenswrapper[4492]: E1126 08:36:19.439948 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:36:30 crc kubenswrapper[4492]: I1126 08:36:30.438359 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:36:30 crc kubenswrapper[4492]: E1126 08:36:30.440870 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:36:42 crc kubenswrapper[4492]: I1126 08:36:42.439678 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:36:42 crc kubenswrapper[4492]: E1126 08:36:42.440867 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:36:54 crc kubenswrapper[4492]: I1126 08:36:54.439486 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:36:54 crc kubenswrapper[4492]: E1126 08:36:54.440438 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:37:07 crc kubenswrapper[4492]: I1126 08:37:07.438844 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:37:07 crc kubenswrapper[4492]: E1126 08:37:07.439888 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:37:21 crc kubenswrapper[4492]: I1126 08:37:21.438010 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:37:21 crc kubenswrapper[4492]: E1126 08:37:21.438950 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:37:33 crc kubenswrapper[4492]: I1126 08:37:33.439131 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:37:33 crc kubenswrapper[4492]: E1126 08:37:33.440546 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:37:46 crc kubenswrapper[4492]: I1126 08:37:46.439641 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:37:46 crc kubenswrapper[4492]: E1126 08:37:46.440786 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:37:59 crc kubenswrapper[4492]: I1126 08:37:59.439448 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:37:59 crc kubenswrapper[4492]: E1126 08:37:59.440671 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:38:13 crc kubenswrapper[4492]: I1126 08:38:13.439624 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:38:13 crc kubenswrapper[4492]: E1126 08:38:13.440508 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:38:26 crc kubenswrapper[4492]: I1126 08:38:26.440145 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:38:26 crc kubenswrapper[4492]: E1126 08:38:26.441411 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:38:38 crc kubenswrapper[4492]: I1126 08:38:38.439166 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:38:38 crc kubenswrapper[4492]: E1126 08:38:38.440479 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:38:49 crc kubenswrapper[4492]: I1126 08:38:49.439203 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:38:49 crc kubenswrapper[4492]: E1126 08:38:49.440320 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:39:04 crc kubenswrapper[4492]: I1126 08:39:04.445769 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:39:04 crc kubenswrapper[4492]: E1126 08:39:04.446621 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:39:17 crc kubenswrapper[4492]: I1126 08:39:17.438723 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:39:17 crc kubenswrapper[4492]: E1126 08:39:17.439748 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:39:28 crc kubenswrapper[4492]: I1126 08:39:28.438944 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:39:29 crc kubenswrapper[4492]: I1126 08:39:29.612863 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"0fc0037b699acf411d41be0850c2d0d066d14797772966cdf096215720cf6e18"} Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.245382 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4fvwc"] Nov 26 08:39:30 crc kubenswrapper[4492]: E1126 08:39:30.248265 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="extract-utilities" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.248292 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="extract-utilities" Nov 26 08:39:30 crc kubenswrapper[4492]: E1126 08:39:30.248312 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="registry-server" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.248318 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="registry-server" Nov 26 08:39:30 crc kubenswrapper[4492]: E1126 08:39:30.248330 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="extract-content" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.248336 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="extract-content" Nov 26 08:39:30 crc kubenswrapper[4492]: E1126 08:39:30.248344 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="560f776b-4180-4a1d-921c-277378e3eb27" containerName="extract-utilities" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.248349 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="560f776b-4180-4a1d-921c-277378e3eb27" containerName="extract-utilities" Nov 26 08:39:30 crc kubenswrapper[4492]: E1126 08:39:30.248376 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="560f776b-4180-4a1d-921c-277378e3eb27" containerName="registry-server" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.248382 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="560f776b-4180-4a1d-921c-277378e3eb27" containerName="registry-server" Nov 26 08:39:30 crc kubenswrapper[4492]: E1126 08:39:30.248392 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="560f776b-4180-4a1d-921c-277378e3eb27" containerName="extract-content" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.248398 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="560f776b-4180-4a1d-921c-277378e3eb27" containerName="extract-content" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.248649 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf7ca456-5942-4afd-80f5-abafd240f511" containerName="registry-server" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.248662 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="560f776b-4180-4a1d-921c-277378e3eb27" containerName="registry-server" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.253145 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.261314 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4fvwc"] Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.338845 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-catalog-content\") pod \"community-operators-4fvwc\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.339202 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-utilities\") pod \"community-operators-4fvwc\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.339328 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbm8s\" (UniqueName: \"kubernetes.io/projected/66a3c3f1-33da-46c7-956c-e33253f79a55-kube-api-access-rbm8s\") pod \"community-operators-4fvwc\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.441202 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-catalog-content\") pod \"community-operators-4fvwc\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.441538 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-utilities\") pod \"community-operators-4fvwc\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.441650 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbm8s\" (UniqueName: \"kubernetes.io/projected/66a3c3f1-33da-46c7-956c-e33253f79a55-kube-api-access-rbm8s\") pod \"community-operators-4fvwc\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.443667 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-catalog-content\") pod \"community-operators-4fvwc\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.444272 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-utilities\") pod \"community-operators-4fvwc\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.467918 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbm8s\" (UniqueName: \"kubernetes.io/projected/66a3c3f1-33da-46c7-956c-e33253f79a55-kube-api-access-rbm8s\") pod \"community-operators-4fvwc\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:30 crc kubenswrapper[4492]: I1126 08:39:30.577454 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:31 crc kubenswrapper[4492]: I1126 08:39:31.609560 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4fvwc"] Nov 26 08:39:31 crc kubenswrapper[4492]: W1126 08:39:31.631838 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66a3c3f1_33da_46c7_956c_e33253f79a55.slice/crio-5ce36840c42e5a3f9464fefb28f7874c0af587d39b3bf8b692ef3c32d09db1d9 WatchSource:0}: Error finding container 5ce36840c42e5a3f9464fefb28f7874c0af587d39b3bf8b692ef3c32d09db1d9: Status 404 returned error can't find the container with id 5ce36840c42e5a3f9464fefb28f7874c0af587d39b3bf8b692ef3c32d09db1d9 Nov 26 08:39:32 crc kubenswrapper[4492]: I1126 08:39:32.651913 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fvwc" event={"ID":"66a3c3f1-33da-46c7-956c-e33253f79a55","Type":"ContainerDied","Data":"7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e"} Nov 26 08:39:32 crc kubenswrapper[4492]: I1126 08:39:32.652895 4492 generic.go:334] "Generic (PLEG): container finished" podID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerID="7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e" exitCode=0 Nov 26 08:39:32 crc kubenswrapper[4492]: I1126 08:39:32.652970 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fvwc" event={"ID":"66a3c3f1-33da-46c7-956c-e33253f79a55","Type":"ContainerStarted","Data":"5ce36840c42e5a3f9464fefb28f7874c0af587d39b3bf8b692ef3c32d09db1d9"} Nov 26 08:39:32 crc kubenswrapper[4492]: I1126 08:39:32.657926 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:39:33 crc kubenswrapper[4492]: I1126 08:39:33.701679 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fvwc" event={"ID":"66a3c3f1-33da-46c7-956c-e33253f79a55","Type":"ContainerStarted","Data":"b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba"} Nov 26 08:39:34 crc kubenswrapper[4492]: I1126 08:39:34.715148 4492 generic.go:334] "Generic (PLEG): container finished" podID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerID="b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba" exitCode=0 Nov 26 08:39:34 crc kubenswrapper[4492]: I1126 08:39:34.715457 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fvwc" event={"ID":"66a3c3f1-33da-46c7-956c-e33253f79a55","Type":"ContainerDied","Data":"b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba"} Nov 26 08:39:35 crc kubenswrapper[4492]: I1126 08:39:35.735002 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fvwc" event={"ID":"66a3c3f1-33da-46c7-956c-e33253f79a55","Type":"ContainerStarted","Data":"76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a"} Nov 26 08:39:40 crc kubenswrapper[4492]: I1126 08:39:40.578384 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:40 crc kubenswrapper[4492]: I1126 08:39:40.579200 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:40 crc kubenswrapper[4492]: I1126 08:39:40.626974 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:40 crc kubenswrapper[4492]: I1126 08:39:40.657668 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4fvwc" podStartSLOduration=8.077671252 podStartE2EDuration="10.656773843s" podCreationTimestamp="2025-11-26 08:39:30 +0000 UTC" firstStartedPulling="2025-11-26 08:39:32.654365326 +0000 UTC m=+6668.538253624" lastFinishedPulling="2025-11-26 08:39:35.233467917 +0000 UTC m=+6671.117356215" observedRunningTime="2025-11-26 08:39:35.760995833 +0000 UTC m=+6671.644884141" watchObservedRunningTime="2025-11-26 08:39:40.656773843 +0000 UTC m=+6676.540662141" Nov 26 08:39:40 crc kubenswrapper[4492]: I1126 08:39:40.845353 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:41 crc kubenswrapper[4492]: I1126 08:39:41.805567 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4fvwc"] Nov 26 08:39:42 crc kubenswrapper[4492]: I1126 08:39:42.825963 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4fvwc" podUID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerName="registry-server" containerID="cri-o://76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a" gracePeriod=2 Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.302876 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.365903 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbm8s\" (UniqueName: \"kubernetes.io/projected/66a3c3f1-33da-46c7-956c-e33253f79a55-kube-api-access-rbm8s\") pod \"66a3c3f1-33da-46c7-956c-e33253f79a55\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.366123 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-utilities\") pod \"66a3c3f1-33da-46c7-956c-e33253f79a55\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.366288 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-catalog-content\") pod \"66a3c3f1-33da-46c7-956c-e33253f79a55\" (UID: \"66a3c3f1-33da-46c7-956c-e33253f79a55\") " Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.368635 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-utilities" (OuterVolumeSpecName: "utilities") pod "66a3c3f1-33da-46c7-956c-e33253f79a55" (UID: "66a3c3f1-33da-46c7-956c-e33253f79a55"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.376596 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66a3c3f1-33da-46c7-956c-e33253f79a55-kube-api-access-rbm8s" (OuterVolumeSpecName: "kube-api-access-rbm8s") pod "66a3c3f1-33da-46c7-956c-e33253f79a55" (UID: "66a3c3f1-33da-46c7-956c-e33253f79a55"). InnerVolumeSpecName "kube-api-access-rbm8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.428580 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66a3c3f1-33da-46c7-956c-e33253f79a55" (UID: "66a3c3f1-33da-46c7-956c-e33253f79a55"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.469327 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbm8s\" (UniqueName: \"kubernetes.io/projected/66a3c3f1-33da-46c7-956c-e33253f79a55-kube-api-access-rbm8s\") on node \"crc\" DevicePath \"\"" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.469358 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.469368 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66a3c3f1-33da-46c7-956c-e33253f79a55-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.845644 4492 generic.go:334] "Generic (PLEG): container finished" podID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerID="76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a" exitCode=0 Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.845707 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fvwc" event={"ID":"66a3c3f1-33da-46c7-956c-e33253f79a55","Type":"ContainerDied","Data":"76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a"} Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.845721 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4fvwc" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.845746 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4fvwc" event={"ID":"66a3c3f1-33da-46c7-956c-e33253f79a55","Type":"ContainerDied","Data":"5ce36840c42e5a3f9464fefb28f7874c0af587d39b3bf8b692ef3c32d09db1d9"} Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.845778 4492 scope.go:117] "RemoveContainer" containerID="76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.882094 4492 scope.go:117] "RemoveContainer" containerID="b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.882276 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4fvwc"] Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.890780 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4fvwc"] Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.903919 4492 scope.go:117] "RemoveContainer" containerID="7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.943056 4492 scope.go:117] "RemoveContainer" containerID="76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a" Nov 26 08:39:43 crc kubenswrapper[4492]: E1126 08:39:43.944851 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a\": container with ID starting with 76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a not found: ID does not exist" containerID="76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.945296 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a"} err="failed to get container status \"76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a\": rpc error: code = NotFound desc = could not find container \"76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a\": container with ID starting with 76198cb19294e4e981b0ab571adbf49b13f962cccdd7b5cbacf74982c545aa6a not found: ID does not exist" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.945331 4492 scope.go:117] "RemoveContainer" containerID="b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba" Nov 26 08:39:43 crc kubenswrapper[4492]: E1126 08:39:43.945682 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba\": container with ID starting with b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba not found: ID does not exist" containerID="b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.945717 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba"} err="failed to get container status \"b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba\": rpc error: code = NotFound desc = could not find container \"b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba\": container with ID starting with b4c7777b006e8b1b05a89973f6a1a48f07cc5a1c7268456cbec007305799f7ba not found: ID does not exist" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.945746 4492 scope.go:117] "RemoveContainer" containerID="7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e" Nov 26 08:39:43 crc kubenswrapper[4492]: E1126 08:39:43.946115 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e\": container with ID starting with 7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e not found: ID does not exist" containerID="7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e" Nov 26 08:39:43 crc kubenswrapper[4492]: I1126 08:39:43.946166 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e"} err="failed to get container status \"7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e\": rpc error: code = NotFound desc = could not find container \"7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e\": container with ID starting with 7eeb2d22adf4b12a312e6b643690e649fe4a9e97cae2da1c61abe7ea02beb02e not found: ID does not exist" Nov 26 08:39:44 crc kubenswrapper[4492]: I1126 08:39:44.450271 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66a3c3f1-33da-46c7-956c-e33253f79a55" path="/var/lib/kubelet/pods/66a3c3f1-33da-46c7-956c-e33253f79a55/volumes" Nov 26 08:41:26 crc kubenswrapper[4492]: I1126 08:41:26.859611 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-t5lz5"] Nov 26 08:41:26 crc kubenswrapper[4492]: E1126 08:41:26.862792 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerName="extract-utilities" Nov 26 08:41:26 crc kubenswrapper[4492]: I1126 08:41:26.862813 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerName="extract-utilities" Nov 26 08:41:26 crc kubenswrapper[4492]: E1126 08:41:26.862870 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerName="registry-server" Nov 26 08:41:26 crc kubenswrapper[4492]: I1126 08:41:26.862877 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerName="registry-server" Nov 26 08:41:26 crc kubenswrapper[4492]: E1126 08:41:26.862890 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerName="extract-content" Nov 26 08:41:26 crc kubenswrapper[4492]: I1126 08:41:26.862897 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerName="extract-content" Nov 26 08:41:26 crc kubenswrapper[4492]: I1126 08:41:26.863104 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="66a3c3f1-33da-46c7-956c-e33253f79a55" containerName="registry-server" Nov 26 08:41:26 crc kubenswrapper[4492]: I1126 08:41:26.864912 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:26 crc kubenswrapper[4492]: I1126 08:41:26.882019 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t5lz5"] Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.005485 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-catalog-content\") pod \"certified-operators-t5lz5\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.005713 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-utilities\") pod \"certified-operators-t5lz5\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.005794 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdvvz\" (UniqueName: \"kubernetes.io/projected/000cee64-1819-41b1-951b-ea0983ff0790-kube-api-access-kdvvz\") pod \"certified-operators-t5lz5\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.106593 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-catalog-content\") pod \"certified-operators-t5lz5\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.106724 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-utilities\") pod \"certified-operators-t5lz5\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.106786 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdvvz\" (UniqueName: \"kubernetes.io/projected/000cee64-1819-41b1-951b-ea0983ff0790-kube-api-access-kdvvz\") pod \"certified-operators-t5lz5\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.107581 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-catalog-content\") pod \"certified-operators-t5lz5\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.107700 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-utilities\") pod \"certified-operators-t5lz5\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.127999 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdvvz\" (UniqueName: \"kubernetes.io/projected/000cee64-1819-41b1-951b-ea0983ff0790-kube-api-access-kdvvz\") pod \"certified-operators-t5lz5\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.183345 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.687623 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t5lz5"] Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.910770 4492 generic.go:334] "Generic (PLEG): container finished" podID="000cee64-1819-41b1-951b-ea0983ff0790" containerID="d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a" exitCode=0 Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.911129 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5lz5" event={"ID":"000cee64-1819-41b1-951b-ea0983ff0790","Type":"ContainerDied","Data":"d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a"} Nov 26 08:41:27 crc kubenswrapper[4492]: I1126 08:41:27.911207 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5lz5" event={"ID":"000cee64-1819-41b1-951b-ea0983ff0790","Type":"ContainerStarted","Data":"0d3773e08bb659d9e0dfcc67fe866cce1991fc4d295c8c49e05f2fa5b0d68f6a"} Nov 26 08:41:28 crc kubenswrapper[4492]: I1126 08:41:28.922895 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5lz5" event={"ID":"000cee64-1819-41b1-951b-ea0983ff0790","Type":"ContainerStarted","Data":"b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941"} Nov 26 08:41:29 crc kubenswrapper[4492]: I1126 08:41:29.933802 4492 generic.go:334] "Generic (PLEG): container finished" podID="000cee64-1819-41b1-951b-ea0983ff0790" containerID="b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941" exitCode=0 Nov 26 08:41:29 crc kubenswrapper[4492]: I1126 08:41:29.933876 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5lz5" event={"ID":"000cee64-1819-41b1-951b-ea0983ff0790","Type":"ContainerDied","Data":"b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941"} Nov 26 08:41:30 crc kubenswrapper[4492]: I1126 08:41:30.948166 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5lz5" event={"ID":"000cee64-1819-41b1-951b-ea0983ff0790","Type":"ContainerStarted","Data":"3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e"} Nov 26 08:41:30 crc kubenswrapper[4492]: I1126 08:41:30.976623 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-t5lz5" podStartSLOduration=2.451544158 podStartE2EDuration="4.976596868s" podCreationTimestamp="2025-11-26 08:41:26 +0000 UTC" firstStartedPulling="2025-11-26 08:41:27.916466949 +0000 UTC m=+6783.800355247" lastFinishedPulling="2025-11-26 08:41:30.441519659 +0000 UTC m=+6786.325407957" observedRunningTime="2025-11-26 08:41:30.96721096 +0000 UTC m=+6786.851099268" watchObservedRunningTime="2025-11-26 08:41:30.976596868 +0000 UTC m=+6786.860485166" Nov 26 08:41:37 crc kubenswrapper[4492]: I1126 08:41:37.183548 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:37 crc kubenswrapper[4492]: I1126 08:41:37.184336 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:37 crc kubenswrapper[4492]: I1126 08:41:37.227604 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:38 crc kubenswrapper[4492]: I1126 08:41:38.060500 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:38 crc kubenswrapper[4492]: I1126 08:41:38.128800 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t5lz5"] Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.037968 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-t5lz5" podUID="000cee64-1819-41b1-951b-ea0983ff0790" containerName="registry-server" containerID="cri-o://3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e" gracePeriod=2 Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.525577 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.657240 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-utilities\") pod \"000cee64-1819-41b1-951b-ea0983ff0790\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.657460 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-catalog-content\") pod \"000cee64-1819-41b1-951b-ea0983ff0790\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.657580 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdvvz\" (UniqueName: \"kubernetes.io/projected/000cee64-1819-41b1-951b-ea0983ff0790-kube-api-access-kdvvz\") pod \"000cee64-1819-41b1-951b-ea0983ff0790\" (UID: \"000cee64-1819-41b1-951b-ea0983ff0790\") " Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.658826 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-utilities" (OuterVolumeSpecName: "utilities") pod "000cee64-1819-41b1-951b-ea0983ff0790" (UID: "000cee64-1819-41b1-951b-ea0983ff0790"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.665593 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/000cee64-1819-41b1-951b-ea0983ff0790-kube-api-access-kdvvz" (OuterVolumeSpecName: "kube-api-access-kdvvz") pod "000cee64-1819-41b1-951b-ea0983ff0790" (UID: "000cee64-1819-41b1-951b-ea0983ff0790"). InnerVolumeSpecName "kube-api-access-kdvvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.701872 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "000cee64-1819-41b1-951b-ea0983ff0790" (UID: "000cee64-1819-41b1-951b-ea0983ff0790"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.760732 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.760769 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/000cee64-1819-41b1-951b-ea0983ff0790-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:41:40 crc kubenswrapper[4492]: I1126 08:41:40.760785 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdvvz\" (UniqueName: \"kubernetes.io/projected/000cee64-1819-41b1-951b-ea0983ff0790-kube-api-access-kdvvz\") on node \"crc\" DevicePath \"\"" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.050467 4492 generic.go:334] "Generic (PLEG): container finished" podID="000cee64-1819-41b1-951b-ea0983ff0790" containerID="3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e" exitCode=0 Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.050535 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5lz5" event={"ID":"000cee64-1819-41b1-951b-ea0983ff0790","Type":"ContainerDied","Data":"3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e"} Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.050538 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t5lz5" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.050589 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5lz5" event={"ID":"000cee64-1819-41b1-951b-ea0983ff0790","Type":"ContainerDied","Data":"0d3773e08bb659d9e0dfcc67fe866cce1991fc4d295c8c49e05f2fa5b0d68f6a"} Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.050623 4492 scope.go:117] "RemoveContainer" containerID="3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.074283 4492 scope.go:117] "RemoveContainer" containerID="b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.109185 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t5lz5"] Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.112567 4492 scope.go:117] "RemoveContainer" containerID="d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.115827 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-t5lz5"] Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.139483 4492 scope.go:117] "RemoveContainer" containerID="3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e" Nov 26 08:41:41 crc kubenswrapper[4492]: E1126 08:41:41.139798 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e\": container with ID starting with 3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e not found: ID does not exist" containerID="3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.139832 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e"} err="failed to get container status \"3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e\": rpc error: code = NotFound desc = could not find container \"3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e\": container with ID starting with 3cbaa83310fcef691737919dc54af98c8ab76834c41b33640ae8e38150b8ab6e not found: ID does not exist" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.139852 4492 scope.go:117] "RemoveContainer" containerID="b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941" Nov 26 08:41:41 crc kubenswrapper[4492]: E1126 08:41:41.140302 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941\": container with ID starting with b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941 not found: ID does not exist" containerID="b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.140327 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941"} err="failed to get container status \"b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941\": rpc error: code = NotFound desc = could not find container \"b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941\": container with ID starting with b655adde8527d3956b377ea972df55e62908ea9d376c1ff32c327121f29fe941 not found: ID does not exist" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.140344 4492 scope.go:117] "RemoveContainer" containerID="d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a" Nov 26 08:41:41 crc kubenswrapper[4492]: E1126 08:41:41.140586 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a\": container with ID starting with d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a not found: ID does not exist" containerID="d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a" Nov 26 08:41:41 crc kubenswrapper[4492]: I1126 08:41:41.140608 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a"} err="failed to get container status \"d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a\": rpc error: code = NotFound desc = could not find container \"d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a\": container with ID starting with d9d2a1bad45c54394212efb03fb46a2bce3cfd605426504422f3a7c918bc1a8a not found: ID does not exist" Nov 26 08:41:42 crc kubenswrapper[4492]: I1126 08:41:42.449134 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="000cee64-1819-41b1-951b-ea0983ff0790" path="/var/lib/kubelet/pods/000cee64-1819-41b1-951b-ea0983ff0790/volumes" Nov 26 08:41:49 crc kubenswrapper[4492]: I1126 08:41:49.441267 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:41:49 crc kubenswrapper[4492]: I1126 08:41:49.443550 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:42:19 crc kubenswrapper[4492]: I1126 08:42:19.441743 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:42:19 crc kubenswrapper[4492]: I1126 08:42:19.442473 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:42:49 crc kubenswrapper[4492]: I1126 08:42:49.441752 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:42:49 crc kubenswrapper[4492]: I1126 08:42:49.443482 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:42:49 crc kubenswrapper[4492]: I1126 08:42:49.443570 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:42:49 crc kubenswrapper[4492]: I1126 08:42:49.444604 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0fc0037b699acf411d41be0850c2d0d066d14797772966cdf096215720cf6e18"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:42:49 crc kubenswrapper[4492]: I1126 08:42:49.444671 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://0fc0037b699acf411d41be0850c2d0d066d14797772966cdf096215720cf6e18" gracePeriod=600 Nov 26 08:42:49 crc kubenswrapper[4492]: I1126 08:42:49.670901 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="0fc0037b699acf411d41be0850c2d0d066d14797772966cdf096215720cf6e18" exitCode=0 Nov 26 08:42:49 crc kubenswrapper[4492]: I1126 08:42:49.670972 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"0fc0037b699acf411d41be0850c2d0d066d14797772966cdf096215720cf6e18"} Nov 26 08:42:49 crc kubenswrapper[4492]: I1126 08:42:49.671119 4492 scope.go:117] "RemoveContainer" containerID="95526513ee74b3dc6b8fbded68d7f89552fca80fa1e15c8363724e3a610dc7fc" Nov 26 08:42:50 crc kubenswrapper[4492]: I1126 08:42:50.680813 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739"} Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.822575 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jgnss"] Nov 26 08:43:03 crc kubenswrapper[4492]: E1126 08:43:03.823898 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="000cee64-1819-41b1-951b-ea0983ff0790" containerName="extract-utilities" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.823916 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="000cee64-1819-41b1-951b-ea0983ff0790" containerName="extract-utilities" Nov 26 08:43:03 crc kubenswrapper[4492]: E1126 08:43:03.823952 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="000cee64-1819-41b1-951b-ea0983ff0790" containerName="registry-server" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.823961 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="000cee64-1819-41b1-951b-ea0983ff0790" containerName="registry-server" Nov 26 08:43:03 crc kubenswrapper[4492]: E1126 08:43:03.823994 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="000cee64-1819-41b1-951b-ea0983ff0790" containerName="extract-content" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.824000 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="000cee64-1819-41b1-951b-ea0983ff0790" containerName="extract-content" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.824283 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="000cee64-1819-41b1-951b-ea0983ff0790" containerName="registry-server" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.828400 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.841026 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jgnss"] Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.881960 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdb79\" (UniqueName: \"kubernetes.io/projected/8379b2dd-5eb1-464d-9cea-b4df7d14f417-kube-api-access-mdb79\") pod \"redhat-operators-jgnss\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.882080 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-catalog-content\") pod \"redhat-operators-jgnss\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.882563 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-utilities\") pod \"redhat-operators-jgnss\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.984393 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdb79\" (UniqueName: \"kubernetes.io/projected/8379b2dd-5eb1-464d-9cea-b4df7d14f417-kube-api-access-mdb79\") pod \"redhat-operators-jgnss\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.984840 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-catalog-content\") pod \"redhat-operators-jgnss\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.985293 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-catalog-content\") pod \"redhat-operators-jgnss\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.985604 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-utilities\") pod \"redhat-operators-jgnss\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:03 crc kubenswrapper[4492]: I1126 08:43:03.985883 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-utilities\") pod \"redhat-operators-jgnss\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:04 crc kubenswrapper[4492]: I1126 08:43:04.003986 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdb79\" (UniqueName: \"kubernetes.io/projected/8379b2dd-5eb1-464d-9cea-b4df7d14f417-kube-api-access-mdb79\") pod \"redhat-operators-jgnss\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:04 crc kubenswrapper[4492]: I1126 08:43:04.149273 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:04 crc kubenswrapper[4492]: I1126 08:43:04.601391 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jgnss"] Nov 26 08:43:04 crc kubenswrapper[4492]: I1126 08:43:04.812459 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jgnss" event={"ID":"8379b2dd-5eb1-464d-9cea-b4df7d14f417","Type":"ContainerStarted","Data":"e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea"} Nov 26 08:43:04 crc kubenswrapper[4492]: I1126 08:43:04.812746 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jgnss" event={"ID":"8379b2dd-5eb1-464d-9cea-b4df7d14f417","Type":"ContainerStarted","Data":"0f7453dfabc217c35f082fa7ac50e0f156d7fb344d81a356d1cffa563ff1ecca"} Nov 26 08:43:05 crc kubenswrapper[4492]: I1126 08:43:05.827499 4492 generic.go:334] "Generic (PLEG): container finished" podID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerID="e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea" exitCode=0 Nov 26 08:43:05 crc kubenswrapper[4492]: I1126 08:43:05.827724 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jgnss" event={"ID":"8379b2dd-5eb1-464d-9cea-b4df7d14f417","Type":"ContainerDied","Data":"e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea"} Nov 26 08:43:05 crc kubenswrapper[4492]: I1126 08:43:05.827822 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jgnss" event={"ID":"8379b2dd-5eb1-464d-9cea-b4df7d14f417","Type":"ContainerStarted","Data":"e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db"} Nov 26 08:43:08 crc kubenswrapper[4492]: I1126 08:43:08.862091 4492 generic.go:334] "Generic (PLEG): container finished" podID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerID="e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db" exitCode=0 Nov 26 08:43:08 crc kubenswrapper[4492]: I1126 08:43:08.862765 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jgnss" event={"ID":"8379b2dd-5eb1-464d-9cea-b4df7d14f417","Type":"ContainerDied","Data":"e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db"} Nov 26 08:43:09 crc kubenswrapper[4492]: I1126 08:43:09.875992 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jgnss" event={"ID":"8379b2dd-5eb1-464d-9cea-b4df7d14f417","Type":"ContainerStarted","Data":"afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa"} Nov 26 08:43:09 crc kubenswrapper[4492]: I1126 08:43:09.898715 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jgnss" podStartSLOduration=2.350429331 podStartE2EDuration="6.898676025s" podCreationTimestamp="2025-11-26 08:43:03 +0000 UTC" firstStartedPulling="2025-11-26 08:43:04.815112622 +0000 UTC m=+6880.699000920" lastFinishedPulling="2025-11-26 08:43:09.363359316 +0000 UTC m=+6885.247247614" observedRunningTime="2025-11-26 08:43:09.894409224 +0000 UTC m=+6885.778297522" watchObservedRunningTime="2025-11-26 08:43:09.898676025 +0000 UTC m=+6885.782564323" Nov 26 08:43:14 crc kubenswrapper[4492]: I1126 08:43:14.150502 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:14 crc kubenswrapper[4492]: I1126 08:43:14.151004 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:15 crc kubenswrapper[4492]: I1126 08:43:15.192470 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jgnss" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerName="registry-server" probeResult="failure" output=< Nov 26 08:43:15 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:43:15 crc kubenswrapper[4492]: > Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.196468 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.242798 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.304336 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cdzrb"] Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.307800 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.315378 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cdzrb"] Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.369938 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-utilities\") pod \"redhat-marketplace-cdzrb\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.370042 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-catalog-content\") pod \"redhat-marketplace-cdzrb\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.370208 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c7xs\" (UniqueName: \"kubernetes.io/projected/0078ee35-89d1-4072-8df8-84c04074628d-kube-api-access-8c7xs\") pod \"redhat-marketplace-cdzrb\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.488482 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-utilities\") pod \"redhat-marketplace-cdzrb\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.489210 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-catalog-content\") pod \"redhat-marketplace-cdzrb\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.489436 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c7xs\" (UniqueName: \"kubernetes.io/projected/0078ee35-89d1-4072-8df8-84c04074628d-kube-api-access-8c7xs\") pod \"redhat-marketplace-cdzrb\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.491511 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-utilities\") pod \"redhat-marketplace-cdzrb\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.496957 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-catalog-content\") pod \"redhat-marketplace-cdzrb\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.538401 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c7xs\" (UniqueName: \"kubernetes.io/projected/0078ee35-89d1-4072-8df8-84c04074628d-kube-api-access-8c7xs\") pod \"redhat-marketplace-cdzrb\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:24 crc kubenswrapper[4492]: I1126 08:43:24.627919 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:25 crc kubenswrapper[4492]: I1126 08:43:25.185073 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cdzrb"] Nov 26 08:43:26 crc kubenswrapper[4492]: I1126 08:43:26.054695 4492 generic.go:334] "Generic (PLEG): container finished" podID="0078ee35-89d1-4072-8df8-84c04074628d" containerID="884a177e0728b4c4c7f28e9f5f3e52fcc8c9e2732d1c0bbe0941d67ed964674f" exitCode=0 Nov 26 08:43:26 crc kubenswrapper[4492]: I1126 08:43:26.054800 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cdzrb" event={"ID":"0078ee35-89d1-4072-8df8-84c04074628d","Type":"ContainerDied","Data":"884a177e0728b4c4c7f28e9f5f3e52fcc8c9e2732d1c0bbe0941d67ed964674f"} Nov 26 08:43:26 crc kubenswrapper[4492]: I1126 08:43:26.055757 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cdzrb" event={"ID":"0078ee35-89d1-4072-8df8-84c04074628d","Type":"ContainerStarted","Data":"1371a9fecab8cb839ae40254fb2a21cddf67b6c63429d1ed6df973a811f0a055"} Nov 26 08:43:27 crc kubenswrapper[4492]: I1126 08:43:27.069140 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cdzrb" event={"ID":"0078ee35-89d1-4072-8df8-84c04074628d","Type":"ContainerStarted","Data":"f4c9e844f50214fde3dfcd0c4ccc46f72eee74fb2ec8296083c756f4e3493e35"} Nov 26 08:43:27 crc kubenswrapper[4492]: I1126 08:43:27.481051 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jgnss"] Nov 26 08:43:27 crc kubenswrapper[4492]: I1126 08:43:27.481681 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jgnss" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerName="registry-server" containerID="cri-o://afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa" gracePeriod=2 Nov 26 08:43:27 crc kubenswrapper[4492]: I1126 08:43:27.908800 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.079524 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-catalog-content\") pod \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.080473 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-utilities\") pod \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.080745 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdb79\" (UniqueName: \"kubernetes.io/projected/8379b2dd-5eb1-464d-9cea-b4df7d14f417-kube-api-access-mdb79\") pod \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\" (UID: \"8379b2dd-5eb1-464d-9cea-b4df7d14f417\") " Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.081313 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-utilities" (OuterVolumeSpecName: "utilities") pod "8379b2dd-5eb1-464d-9cea-b4df7d14f417" (UID: "8379b2dd-5eb1-464d-9cea-b4df7d14f417"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.082039 4492 generic.go:334] "Generic (PLEG): container finished" podID="0078ee35-89d1-4072-8df8-84c04074628d" containerID="f4c9e844f50214fde3dfcd0c4ccc46f72eee74fb2ec8296083c756f4e3493e35" exitCode=0 Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.082205 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cdzrb" event={"ID":"0078ee35-89d1-4072-8df8-84c04074628d","Type":"ContainerDied","Data":"f4c9e844f50214fde3dfcd0c4ccc46f72eee74fb2ec8296083c756f4e3493e35"} Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.091991 4492 generic.go:334] "Generic (PLEG): container finished" podID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerID="afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa" exitCode=0 Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.092042 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jgnss" event={"ID":"8379b2dd-5eb1-464d-9cea-b4df7d14f417","Type":"ContainerDied","Data":"afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa"} Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.092076 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jgnss" event={"ID":"8379b2dd-5eb1-464d-9cea-b4df7d14f417","Type":"ContainerDied","Data":"0f7453dfabc217c35f082fa7ac50e0f156d7fb344d81a356d1cffa563ff1ecca"} Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.093197 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jgnss" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.093506 4492 scope.go:117] "RemoveContainer" containerID="afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.098006 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8379b2dd-5eb1-464d-9cea-b4df7d14f417-kube-api-access-mdb79" (OuterVolumeSpecName: "kube-api-access-mdb79") pod "8379b2dd-5eb1-464d-9cea-b4df7d14f417" (UID: "8379b2dd-5eb1-464d-9cea-b4df7d14f417"). InnerVolumeSpecName "kube-api-access-mdb79". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.182072 4492 scope.go:117] "RemoveContainer" containerID="e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.185583 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.185702 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdb79\" (UniqueName: \"kubernetes.io/projected/8379b2dd-5eb1-464d-9cea-b4df7d14f417-kube-api-access-mdb79\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.200250 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8379b2dd-5eb1-464d-9cea-b4df7d14f417" (UID: "8379b2dd-5eb1-464d-9cea-b4df7d14f417"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.220297 4492 scope.go:117] "RemoveContainer" containerID="e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.247449 4492 scope.go:117] "RemoveContainer" containerID="afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa" Nov 26 08:43:28 crc kubenswrapper[4492]: E1126 08:43:28.247795 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa\": container with ID starting with afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa not found: ID does not exist" containerID="afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.247828 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa"} err="failed to get container status \"afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa\": rpc error: code = NotFound desc = could not find container \"afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa\": container with ID starting with afb55a97fea83e5ee662a1fa22b87965ce1a9c3597902f828790e7fccf4b96aa not found: ID does not exist" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.247854 4492 scope.go:117] "RemoveContainer" containerID="e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db" Nov 26 08:43:28 crc kubenswrapper[4492]: E1126 08:43:28.248095 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db\": container with ID starting with e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db not found: ID does not exist" containerID="e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.248119 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db"} err="failed to get container status \"e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db\": rpc error: code = NotFound desc = could not find container \"e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db\": container with ID starting with e267a84103e93de808db92d80a1ab79d3d666108852af70d2b416fb343f190db not found: ID does not exist" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.248135 4492 scope.go:117] "RemoveContainer" containerID="e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea" Nov 26 08:43:28 crc kubenswrapper[4492]: E1126 08:43:28.248500 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea\": container with ID starting with e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea not found: ID does not exist" containerID="e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.248520 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea"} err="failed to get container status \"e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea\": rpc error: code = NotFound desc = could not find container \"e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea\": container with ID starting with e88e40f3906d3f45d0e67fd1100c4bf9b37b0b7dcf4988463668f5976af5d6ea not found: ID does not exist" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.287727 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8379b2dd-5eb1-464d-9cea-b4df7d14f417-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.461665 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jgnss"] Nov 26 08:43:28 crc kubenswrapper[4492]: I1126 08:43:28.476221 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jgnss"] Nov 26 08:43:29 crc kubenswrapper[4492]: I1126 08:43:29.106962 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cdzrb" event={"ID":"0078ee35-89d1-4072-8df8-84c04074628d","Type":"ContainerStarted","Data":"d02adfeb4775002c27c7ec6a765af2e2594e5e6123020463b4c9f77be3dd9087"} Nov 26 08:43:30 crc kubenswrapper[4492]: I1126 08:43:30.451985 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" path="/var/lib/kubelet/pods/8379b2dd-5eb1-464d-9cea-b4df7d14f417/volumes" Nov 26 08:43:34 crc kubenswrapper[4492]: I1126 08:43:34.628916 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:34 crc kubenswrapper[4492]: I1126 08:43:34.629589 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:34 crc kubenswrapper[4492]: I1126 08:43:34.675911 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:34 crc kubenswrapper[4492]: I1126 08:43:34.703912 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cdzrb" podStartSLOduration=8.112166966 podStartE2EDuration="10.703874194s" podCreationTimestamp="2025-11-26 08:43:24 +0000 UTC" firstStartedPulling="2025-11-26 08:43:26.057306359 +0000 UTC m=+6901.941194657" lastFinishedPulling="2025-11-26 08:43:28.649013588 +0000 UTC m=+6904.532901885" observedRunningTime="2025-11-26 08:43:29.13673453 +0000 UTC m=+6905.020622828" watchObservedRunningTime="2025-11-26 08:43:34.703874194 +0000 UTC m=+6910.587762492" Nov 26 08:43:35 crc kubenswrapper[4492]: I1126 08:43:35.214272 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:38 crc kubenswrapper[4492]: I1126 08:43:38.887497 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cdzrb"] Nov 26 08:43:38 crc kubenswrapper[4492]: I1126 08:43:38.888259 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cdzrb" podUID="0078ee35-89d1-4072-8df8-84c04074628d" containerName="registry-server" containerID="cri-o://d02adfeb4775002c27c7ec6a765af2e2594e5e6123020463b4c9f77be3dd9087" gracePeriod=2 Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.252620 4492 generic.go:334] "Generic (PLEG): container finished" podID="0078ee35-89d1-4072-8df8-84c04074628d" containerID="d02adfeb4775002c27c7ec6a765af2e2594e5e6123020463b4c9f77be3dd9087" exitCode=0 Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.252670 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cdzrb" event={"ID":"0078ee35-89d1-4072-8df8-84c04074628d","Type":"ContainerDied","Data":"d02adfeb4775002c27c7ec6a765af2e2594e5e6123020463b4c9f77be3dd9087"} Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.447331 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.556771 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-catalog-content\") pod \"0078ee35-89d1-4072-8df8-84c04074628d\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.556921 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-utilities\") pod \"0078ee35-89d1-4072-8df8-84c04074628d\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.557035 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8c7xs\" (UniqueName: \"kubernetes.io/projected/0078ee35-89d1-4072-8df8-84c04074628d-kube-api-access-8c7xs\") pod \"0078ee35-89d1-4072-8df8-84c04074628d\" (UID: \"0078ee35-89d1-4072-8df8-84c04074628d\") " Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.557928 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-utilities" (OuterVolumeSpecName: "utilities") pod "0078ee35-89d1-4072-8df8-84c04074628d" (UID: "0078ee35-89d1-4072-8df8-84c04074628d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.575712 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0078ee35-89d1-4072-8df8-84c04074628d" (UID: "0078ee35-89d1-4072-8df8-84c04074628d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.576297 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0078ee35-89d1-4072-8df8-84c04074628d-kube-api-access-8c7xs" (OuterVolumeSpecName: "kube-api-access-8c7xs") pod "0078ee35-89d1-4072-8df8-84c04074628d" (UID: "0078ee35-89d1-4072-8df8-84c04074628d"). InnerVolumeSpecName "kube-api-access-8c7xs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.660633 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.660682 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0078ee35-89d1-4072-8df8-84c04074628d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:39 crc kubenswrapper[4492]: I1126 08:43:39.660694 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8c7xs\" (UniqueName: \"kubernetes.io/projected/0078ee35-89d1-4072-8df8-84c04074628d-kube-api-access-8c7xs\") on node \"crc\" DevicePath \"\"" Nov 26 08:43:40 crc kubenswrapper[4492]: I1126 08:43:40.263524 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cdzrb" event={"ID":"0078ee35-89d1-4072-8df8-84c04074628d","Type":"ContainerDied","Data":"1371a9fecab8cb839ae40254fb2a21cddf67b6c63429d1ed6df973a811f0a055"} Nov 26 08:43:40 crc kubenswrapper[4492]: I1126 08:43:40.263592 4492 scope.go:117] "RemoveContainer" containerID="d02adfeb4775002c27c7ec6a765af2e2594e5e6123020463b4c9f77be3dd9087" Nov 26 08:43:40 crc kubenswrapper[4492]: I1126 08:43:40.263631 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cdzrb" Nov 26 08:43:40 crc kubenswrapper[4492]: I1126 08:43:40.286860 4492 scope.go:117] "RemoveContainer" containerID="f4c9e844f50214fde3dfcd0c4ccc46f72eee74fb2ec8296083c756f4e3493e35" Nov 26 08:43:40 crc kubenswrapper[4492]: I1126 08:43:40.306908 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cdzrb"] Nov 26 08:43:40 crc kubenswrapper[4492]: I1126 08:43:40.317315 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cdzrb"] Nov 26 08:43:40 crc kubenswrapper[4492]: I1126 08:43:40.324436 4492 scope.go:117] "RemoveContainer" containerID="884a177e0728b4c4c7f28e9f5f3e52fcc8c9e2732d1c0bbe0941d67ed964674f" Nov 26 08:43:40 crc kubenswrapper[4492]: I1126 08:43:40.451080 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0078ee35-89d1-4072-8df8-84c04074628d" path="/var/lib/kubelet/pods/0078ee35-89d1-4072-8df8-84c04074628d/volumes" Nov 26 08:44:49 crc kubenswrapper[4492]: I1126 08:44:49.441135 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:44:49 crc kubenswrapper[4492]: I1126 08:44:49.441707 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.397612 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7"] Nov 26 08:45:00 crc kubenswrapper[4492]: E1126 08:45:00.400534 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerName="extract-utilities" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.400577 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerName="extract-utilities" Nov 26 08:45:00 crc kubenswrapper[4492]: E1126 08:45:00.400594 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0078ee35-89d1-4072-8df8-84c04074628d" containerName="registry-server" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.400602 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0078ee35-89d1-4072-8df8-84c04074628d" containerName="registry-server" Nov 26 08:45:00 crc kubenswrapper[4492]: E1126 08:45:00.400622 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0078ee35-89d1-4072-8df8-84c04074628d" containerName="extract-content" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.400630 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0078ee35-89d1-4072-8df8-84c04074628d" containerName="extract-content" Nov 26 08:45:00 crc kubenswrapper[4492]: E1126 08:45:00.400639 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerName="registry-server" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.400647 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerName="registry-server" Nov 26 08:45:00 crc kubenswrapper[4492]: E1126 08:45:00.400702 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerName="extract-content" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.400717 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerName="extract-content" Nov 26 08:45:00 crc kubenswrapper[4492]: E1126 08:45:00.400750 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0078ee35-89d1-4072-8df8-84c04074628d" containerName="extract-utilities" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.400757 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="0078ee35-89d1-4072-8df8-84c04074628d" containerName="extract-utilities" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.401424 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="0078ee35-89d1-4072-8df8-84c04074628d" containerName="registry-server" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.401605 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="8379b2dd-5eb1-464d-9cea-b4df7d14f417" containerName="registry-server" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.403120 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.413968 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.414085 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.422372 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7"] Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.607654 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2bd3c63-c739-4dca-9933-3d00160d3f0c-secret-volume\") pod \"collect-profiles-29402445-jcpn7\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.608060 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85q6c\" (UniqueName: \"kubernetes.io/projected/a2bd3c63-c739-4dca-9933-3d00160d3f0c-kube-api-access-85q6c\") pod \"collect-profiles-29402445-jcpn7\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.608332 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2bd3c63-c739-4dca-9933-3d00160d3f0c-config-volume\") pod \"collect-profiles-29402445-jcpn7\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.710579 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85q6c\" (UniqueName: \"kubernetes.io/projected/a2bd3c63-c739-4dca-9933-3d00160d3f0c-kube-api-access-85q6c\") pod \"collect-profiles-29402445-jcpn7\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.710987 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2bd3c63-c739-4dca-9933-3d00160d3f0c-config-volume\") pod \"collect-profiles-29402445-jcpn7\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.711058 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2bd3c63-c739-4dca-9933-3d00160d3f0c-secret-volume\") pod \"collect-profiles-29402445-jcpn7\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.716538 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2bd3c63-c739-4dca-9933-3d00160d3f0c-config-volume\") pod \"collect-profiles-29402445-jcpn7\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.728076 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2bd3c63-c739-4dca-9933-3d00160d3f0c-secret-volume\") pod \"collect-profiles-29402445-jcpn7\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:00 crc kubenswrapper[4492]: I1126 08:45:00.732604 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85q6c\" (UniqueName: \"kubernetes.io/projected/a2bd3c63-c739-4dca-9933-3d00160d3f0c-kube-api-access-85q6c\") pod \"collect-profiles-29402445-jcpn7\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:01 crc kubenswrapper[4492]: I1126 08:45:01.027696 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:01 crc kubenswrapper[4492]: I1126 08:45:01.571883 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7"] Nov 26 08:45:02 crc kubenswrapper[4492]: I1126 08:45:02.052516 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" event={"ID":"a2bd3c63-c739-4dca-9933-3d00160d3f0c","Type":"ContainerStarted","Data":"2b471158ef1795ee42634b2de439c10d304bc77420100b01c3e6bb4e1e866f8e"} Nov 26 08:45:02 crc kubenswrapper[4492]: I1126 08:45:02.052991 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" event={"ID":"a2bd3c63-c739-4dca-9933-3d00160d3f0c","Type":"ContainerStarted","Data":"5470621bb7d453504cfd2d035f2a445662a206b37f6d245d651c73fcb85f6c9c"} Nov 26 08:45:02 crc kubenswrapper[4492]: I1126 08:45:02.071411 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" podStartSLOduration=2.07139131 podStartE2EDuration="2.07139131s" podCreationTimestamp="2025-11-26 08:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:45:02.0671579 +0000 UTC m=+6997.951046199" watchObservedRunningTime="2025-11-26 08:45:02.07139131 +0000 UTC m=+6997.955279608" Nov 26 08:45:03 crc kubenswrapper[4492]: I1126 08:45:03.066569 4492 generic.go:334] "Generic (PLEG): container finished" podID="a2bd3c63-c739-4dca-9933-3d00160d3f0c" containerID="2b471158ef1795ee42634b2de439c10d304bc77420100b01c3e6bb4e1e866f8e" exitCode=0 Nov 26 08:45:03 crc kubenswrapper[4492]: I1126 08:45:03.066711 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" event={"ID":"a2bd3c63-c739-4dca-9933-3d00160d3f0c","Type":"ContainerDied","Data":"2b471158ef1795ee42634b2de439c10d304bc77420100b01c3e6bb4e1e866f8e"} Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.436780 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.599996 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2bd3c63-c739-4dca-9933-3d00160d3f0c-config-volume\") pod \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.600536 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2bd3c63-c739-4dca-9933-3d00160d3f0c-secret-volume\") pod \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.600708 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85q6c\" (UniqueName: \"kubernetes.io/projected/a2bd3c63-c739-4dca-9933-3d00160d3f0c-kube-api-access-85q6c\") pod \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\" (UID: \"a2bd3c63-c739-4dca-9933-3d00160d3f0c\") " Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.601484 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2bd3c63-c739-4dca-9933-3d00160d3f0c-config-volume" (OuterVolumeSpecName: "config-volume") pod "a2bd3c63-c739-4dca-9933-3d00160d3f0c" (UID: "a2bd3c63-c739-4dca-9933-3d00160d3f0c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.611860 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2bd3c63-c739-4dca-9933-3d00160d3f0c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a2bd3c63-c739-4dca-9933-3d00160d3f0c" (UID: "a2bd3c63-c739-4dca-9933-3d00160d3f0c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.612446 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2bd3c63-c739-4dca-9933-3d00160d3f0c-kube-api-access-85q6c" (OuterVolumeSpecName: "kube-api-access-85q6c") pod "a2bd3c63-c739-4dca-9933-3d00160d3f0c" (UID: "a2bd3c63-c739-4dca-9933-3d00160d3f0c"). InnerVolumeSpecName "kube-api-access-85q6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.668902 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86"] Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.679920 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402400-sxl86"] Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.703613 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a2bd3c63-c739-4dca-9933-3d00160d3f0c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.703644 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a2bd3c63-c739-4dca-9933-3d00160d3f0c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 08:45:04 crc kubenswrapper[4492]: I1126 08:45:04.703659 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85q6c\" (UniqueName: \"kubernetes.io/projected/a2bd3c63-c739-4dca-9933-3d00160d3f0c-kube-api-access-85q6c\") on node \"crc\" DevicePath \"\"" Nov 26 08:45:05 crc kubenswrapper[4492]: I1126 08:45:05.086137 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" event={"ID":"a2bd3c63-c739-4dca-9933-3d00160d3f0c","Type":"ContainerDied","Data":"5470621bb7d453504cfd2d035f2a445662a206b37f6d245d651c73fcb85f6c9c"} Nov 26 08:45:05 crc kubenswrapper[4492]: I1126 08:45:05.086382 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402445-jcpn7" Nov 26 08:45:05 crc kubenswrapper[4492]: I1126 08:45:05.086230 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5470621bb7d453504cfd2d035f2a445662a206b37f6d245d651c73fcb85f6c9c" Nov 26 08:45:06 crc kubenswrapper[4492]: I1126 08:45:06.449009 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="649543fe-36bb-4582-877f-2dc13813134b" path="/var/lib/kubelet/pods/649543fe-36bb-4582-877f-2dc13813134b/volumes" Nov 26 08:45:19 crc kubenswrapper[4492]: I1126 08:45:19.442188 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:45:19 crc kubenswrapper[4492]: I1126 08:45:19.443987 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:45:49 crc kubenswrapper[4492]: I1126 08:45:49.441015 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:45:49 crc kubenswrapper[4492]: I1126 08:45:49.441633 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:45:49 crc kubenswrapper[4492]: I1126 08:45:49.441684 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:45:49 crc kubenswrapper[4492]: I1126 08:45:49.442729 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:45:49 crc kubenswrapper[4492]: I1126 08:45:49.443010 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" gracePeriod=600 Nov 26 08:45:49 crc kubenswrapper[4492]: E1126 08:45:49.571761 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:45:50 crc kubenswrapper[4492]: I1126 08:45:50.559741 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" exitCode=0 Nov 26 08:45:50 crc kubenswrapper[4492]: I1126 08:45:50.559811 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739"} Nov 26 08:45:50 crc kubenswrapper[4492]: I1126 08:45:50.560621 4492 scope.go:117] "RemoveContainer" containerID="0fc0037b699acf411d41be0850c2d0d066d14797772966cdf096215720cf6e18" Nov 26 08:45:50 crc kubenswrapper[4492]: I1126 08:45:50.562901 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:45:50 crc kubenswrapper[4492]: E1126 08:45:50.565301 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:46:01 crc kubenswrapper[4492]: I1126 08:46:01.941468 4492 scope.go:117] "RemoveContainer" containerID="72695bb6b59cf0a7b4f51323b6e0bb759dd02db52676d8a218e363c0f918a1ca" Nov 26 08:46:04 crc kubenswrapper[4492]: I1126 08:46:04.439281 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:46:04 crc kubenswrapper[4492]: E1126 08:46:04.439961 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:46:17 crc kubenswrapper[4492]: I1126 08:46:17.440443 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:46:17 crc kubenswrapper[4492]: E1126 08:46:17.441452 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:46:30 crc kubenswrapper[4492]: I1126 08:46:30.438564 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:46:30 crc kubenswrapper[4492]: E1126 08:46:30.439407 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.606893 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-549d7cff59-nzj5j"] Nov 26 08:46:32 crc kubenswrapper[4492]: E1126 08:46:32.607603 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bd3c63-c739-4dca-9933-3d00160d3f0c" containerName="collect-profiles" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.607618 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bd3c63-c739-4dca-9933-3d00160d3f0c" containerName="collect-profiles" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.607808 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bd3c63-c739-4dca-9933-3d00160d3f0c" containerName="collect-profiles" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.608822 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.640506 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-httpd-config\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.640634 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cknb6\" (UniqueName: \"kubernetes.io/projected/cee2a14f-baa1-4dd7-b54c-29bdf673809e-kube-api-access-cknb6\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.640715 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-public-tls-certs\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.640874 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-internal-tls-certs\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.640998 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-combined-ca-bundle\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.641070 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-ovndb-tls-certs\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.641101 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-config\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.672194 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-549d7cff59-nzj5j"] Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.743924 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cknb6\" (UniqueName: \"kubernetes.io/projected/cee2a14f-baa1-4dd7-b54c-29bdf673809e-kube-api-access-cknb6\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.744000 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-public-tls-certs\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.744077 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-internal-tls-certs\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.744158 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-combined-ca-bundle\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.744238 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-ovndb-tls-certs\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.744275 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-config\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.744318 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-httpd-config\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.758363 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-httpd-config\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.758369 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-combined-ca-bundle\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.758622 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-ovndb-tls-certs\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.763220 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cknb6\" (UniqueName: \"kubernetes.io/projected/cee2a14f-baa1-4dd7-b54c-29bdf673809e-kube-api-access-cknb6\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.764085 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-public-tls-certs\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.764922 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-config\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.765284 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cee2a14f-baa1-4dd7-b54c-29bdf673809e-internal-tls-certs\") pod \"neutron-549d7cff59-nzj5j\" (UID: \"cee2a14f-baa1-4dd7-b54c-29bdf673809e\") " pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:32 crc kubenswrapper[4492]: I1126 08:46:32.925582 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:33 crc kubenswrapper[4492]: I1126 08:46:33.794202 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-549d7cff59-nzj5j"] Nov 26 08:46:33 crc kubenswrapper[4492]: I1126 08:46:33.993537 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-549d7cff59-nzj5j" event={"ID":"cee2a14f-baa1-4dd7-b54c-29bdf673809e","Type":"ContainerStarted","Data":"15678fb91392645a9078c3057bcbb3ab827e51309fe92db9c4167e5dae0ddcd0"} Nov 26 08:46:35 crc kubenswrapper[4492]: I1126 08:46:35.002949 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-549d7cff59-nzj5j" event={"ID":"cee2a14f-baa1-4dd7-b54c-29bdf673809e","Type":"ContainerStarted","Data":"88652b123dc867cfe22359cf5da405d2fa0dc1be7c9286be018533147f24026f"} Nov 26 08:46:35 crc kubenswrapper[4492]: I1126 08:46:35.003414 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:46:35 crc kubenswrapper[4492]: I1126 08:46:35.003431 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-549d7cff59-nzj5j" event={"ID":"cee2a14f-baa1-4dd7-b54c-29bdf673809e","Type":"ContainerStarted","Data":"c10c6ad0974b5ce4b53a0201d2a7a42efa0cb5f72c8190a04d548420d531a11b"} Nov 26 08:46:35 crc kubenswrapper[4492]: I1126 08:46:35.024942 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-549d7cff59-nzj5j" podStartSLOduration=3.024923259 podStartE2EDuration="3.024923259s" podCreationTimestamp="2025-11-26 08:46:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 08:46:35.019635068 +0000 UTC m=+7090.903523365" watchObservedRunningTime="2025-11-26 08:46:35.024923259 +0000 UTC m=+7090.908811558" Nov 26 08:46:42 crc kubenswrapper[4492]: I1126 08:46:42.441508 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:46:42 crc kubenswrapper[4492]: E1126 08:46:42.442925 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:46:55 crc kubenswrapper[4492]: I1126 08:46:55.439627 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:46:55 crc kubenswrapper[4492]: E1126 08:46:55.440578 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:47:02 crc kubenswrapper[4492]: I1126 08:47:02.942652 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-549d7cff59-nzj5j" Nov 26 08:47:03 crc kubenswrapper[4492]: I1126 08:47:03.026497 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5fbdc58545-c2wg9"] Nov 26 08:47:03 crc kubenswrapper[4492]: I1126 08:47:03.027149 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5fbdc58545-c2wg9" podUID="212dbc36-1b4c-4add-953b-70392589a53e" containerName="neutron-api" containerID="cri-o://d7744fa3c710939ec5016891f5f34c605135bc6a67488c47ad9a618a29aa496e" gracePeriod=30 Nov 26 08:47:03 crc kubenswrapper[4492]: I1126 08:47:03.027414 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5fbdc58545-c2wg9" podUID="212dbc36-1b4c-4add-953b-70392589a53e" containerName="neutron-httpd" containerID="cri-o://619eb3041b12c862d0b441935fc9fc1a1c86130707ac4f001f31f338711f9b6b" gracePeriod=30 Nov 26 08:47:04 crc kubenswrapper[4492]: I1126 08:47:04.271148 4492 generic.go:334] "Generic (PLEG): container finished" podID="212dbc36-1b4c-4add-953b-70392589a53e" containerID="619eb3041b12c862d0b441935fc9fc1a1c86130707ac4f001f31f338711f9b6b" exitCode=0 Nov 26 08:47:04 crc kubenswrapper[4492]: I1126 08:47:04.271233 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fbdc58545-c2wg9" event={"ID":"212dbc36-1b4c-4add-953b-70392589a53e","Type":"ContainerDied","Data":"619eb3041b12c862d0b441935fc9fc1a1c86130707ac4f001f31f338711f9b6b"} Nov 26 08:47:06 crc kubenswrapper[4492]: I1126 08:47:06.439091 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:47:06 crc kubenswrapper[4492]: E1126 08:47:06.439696 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.325303 4492 generic.go:334] "Generic (PLEG): container finished" podID="212dbc36-1b4c-4add-953b-70392589a53e" containerID="d7744fa3c710939ec5016891f5f34c605135bc6a67488c47ad9a618a29aa496e" exitCode=0 Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.325366 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fbdc58545-c2wg9" event={"ID":"212dbc36-1b4c-4add-953b-70392589a53e","Type":"ContainerDied","Data":"d7744fa3c710939ec5016891f5f34c605135bc6a67488c47ad9a618a29aa496e"} Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.619269 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.795301 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-internal-tls-certs\") pod \"212dbc36-1b4c-4add-953b-70392589a53e\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.795400 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2nv2\" (UniqueName: \"kubernetes.io/projected/212dbc36-1b4c-4add-953b-70392589a53e-kube-api-access-g2nv2\") pod \"212dbc36-1b4c-4add-953b-70392589a53e\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.795462 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-config\") pod \"212dbc36-1b4c-4add-953b-70392589a53e\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.795585 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-combined-ca-bundle\") pod \"212dbc36-1b4c-4add-953b-70392589a53e\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.795613 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-httpd-config\") pod \"212dbc36-1b4c-4add-953b-70392589a53e\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.795656 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-public-tls-certs\") pod \"212dbc36-1b4c-4add-953b-70392589a53e\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.795711 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-ovndb-tls-certs\") pod \"212dbc36-1b4c-4add-953b-70392589a53e\" (UID: \"212dbc36-1b4c-4add-953b-70392589a53e\") " Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.806536 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "212dbc36-1b4c-4add-953b-70392589a53e" (UID: "212dbc36-1b4c-4add-953b-70392589a53e"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.807167 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/212dbc36-1b4c-4add-953b-70392589a53e-kube-api-access-g2nv2" (OuterVolumeSpecName: "kube-api-access-g2nv2") pod "212dbc36-1b4c-4add-953b-70392589a53e" (UID: "212dbc36-1b4c-4add-953b-70392589a53e"). InnerVolumeSpecName "kube-api-access-g2nv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.858961 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "212dbc36-1b4c-4add-953b-70392589a53e" (UID: "212dbc36-1b4c-4add-953b-70392589a53e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.865521 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "212dbc36-1b4c-4add-953b-70392589a53e" (UID: "212dbc36-1b4c-4add-953b-70392589a53e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.866070 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-config" (OuterVolumeSpecName: "config") pod "212dbc36-1b4c-4add-953b-70392589a53e" (UID: "212dbc36-1b4c-4add-953b-70392589a53e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.872650 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "212dbc36-1b4c-4add-953b-70392589a53e" (UID: "212dbc36-1b4c-4add-953b-70392589a53e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.879959 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "212dbc36-1b4c-4add-953b-70392589a53e" (UID: "212dbc36-1b4c-4add-953b-70392589a53e"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.898655 4492 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.898683 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2nv2\" (UniqueName: \"kubernetes.io/projected/212dbc36-1b4c-4add-953b-70392589a53e-kube-api-access-g2nv2\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.898695 4492 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.898708 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.898716 4492 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.898723 4492 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:08 crc kubenswrapper[4492]: I1126 08:47:08.898731 4492 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/212dbc36-1b4c-4add-953b-70392589a53e-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 08:47:09 crc kubenswrapper[4492]: I1126 08:47:09.336983 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fbdc58545-c2wg9" event={"ID":"212dbc36-1b4c-4add-953b-70392589a53e","Type":"ContainerDied","Data":"ff55702af4ca74dda865b393806d543b28810a2d8fb2b0f2c4e6c9bbbe56a572"} Nov 26 08:47:09 crc kubenswrapper[4492]: I1126 08:47:09.337042 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fbdc58545-c2wg9" Nov 26 08:47:09 crc kubenswrapper[4492]: I1126 08:47:09.338044 4492 scope.go:117] "RemoveContainer" containerID="619eb3041b12c862d0b441935fc9fc1a1c86130707ac4f001f31f338711f9b6b" Nov 26 08:47:09 crc kubenswrapper[4492]: I1126 08:47:09.377040 4492 scope.go:117] "RemoveContainer" containerID="d7744fa3c710939ec5016891f5f34c605135bc6a67488c47ad9a618a29aa496e" Nov 26 08:47:09 crc kubenswrapper[4492]: I1126 08:47:09.382623 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5fbdc58545-c2wg9"] Nov 26 08:47:09 crc kubenswrapper[4492]: I1126 08:47:09.390266 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5fbdc58545-c2wg9"] Nov 26 08:47:10 crc kubenswrapper[4492]: I1126 08:47:10.450693 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="212dbc36-1b4c-4add-953b-70392589a53e" path="/var/lib/kubelet/pods/212dbc36-1b4c-4add-953b-70392589a53e/volumes" Nov 26 08:47:17 crc kubenswrapper[4492]: I1126 08:47:17.439233 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:47:17 crc kubenswrapper[4492]: E1126 08:47:17.440355 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:47:29 crc kubenswrapper[4492]: I1126 08:47:29.438559 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:47:29 crc kubenswrapper[4492]: E1126 08:47:29.439367 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:47:40 crc kubenswrapper[4492]: I1126 08:47:40.439207 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:47:40 crc kubenswrapper[4492]: E1126 08:47:40.440028 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:47:54 crc kubenswrapper[4492]: I1126 08:47:54.445791 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:47:54 crc kubenswrapper[4492]: E1126 08:47:54.446616 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:48:09 crc kubenswrapper[4492]: I1126 08:48:09.438601 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:48:09 crc kubenswrapper[4492]: E1126 08:48:09.440450 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:48:21 crc kubenswrapper[4492]: I1126 08:48:21.439566 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:48:21 crc kubenswrapper[4492]: E1126 08:48:21.440581 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:48:33 crc kubenswrapper[4492]: I1126 08:48:33.438850 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:48:33 crc kubenswrapper[4492]: E1126 08:48:33.439583 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:48:47 crc kubenswrapper[4492]: I1126 08:48:47.440383 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:48:47 crc kubenswrapper[4492]: E1126 08:48:47.441540 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:48:58 crc kubenswrapper[4492]: I1126 08:48:58.438992 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:48:58 crc kubenswrapper[4492]: E1126 08:48:58.439867 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:49:10 crc kubenswrapper[4492]: I1126 08:49:10.439635 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:49:10 crc kubenswrapper[4492]: E1126 08:49:10.440719 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:49:21 crc kubenswrapper[4492]: I1126 08:49:21.438345 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:49:21 crc kubenswrapper[4492]: E1126 08:49:21.440147 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:49:33 crc kubenswrapper[4492]: I1126 08:49:33.439785 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:49:33 crc kubenswrapper[4492]: E1126 08:49:33.440851 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:49:45 crc kubenswrapper[4492]: I1126 08:49:45.439417 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:49:45 crc kubenswrapper[4492]: E1126 08:49:45.440454 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.879051 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qsxxl"] Nov 26 08:49:53 crc kubenswrapper[4492]: E1126 08:49:53.880831 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="212dbc36-1b4c-4add-953b-70392589a53e" containerName="neutron-httpd" Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.880918 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="212dbc36-1b4c-4add-953b-70392589a53e" containerName="neutron-httpd" Nov 26 08:49:53 crc kubenswrapper[4492]: E1126 08:49:53.880985 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="212dbc36-1b4c-4add-953b-70392589a53e" containerName="neutron-api" Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.881038 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="212dbc36-1b4c-4add-953b-70392589a53e" containerName="neutron-api" Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.881375 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="212dbc36-1b4c-4add-953b-70392589a53e" containerName="neutron-api" Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.881462 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="212dbc36-1b4c-4add-953b-70392589a53e" containerName="neutron-httpd" Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.882782 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.889583 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qsxxl"] Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.897575 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-utilities\") pod \"community-operators-qsxxl\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.897744 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-catalog-content\") pod \"community-operators-qsxxl\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:53 crc kubenswrapper[4492]: I1126 08:49:53.897833 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82gvh\" (UniqueName: \"kubernetes.io/projected/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-kube-api-access-82gvh\") pod \"community-operators-qsxxl\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.000287 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-utilities\") pod \"community-operators-qsxxl\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.000341 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-catalog-content\") pod \"community-operators-qsxxl\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.000400 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82gvh\" (UniqueName: \"kubernetes.io/projected/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-kube-api-access-82gvh\") pod \"community-operators-qsxxl\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.001305 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-utilities\") pod \"community-operators-qsxxl\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.001383 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-catalog-content\") pod \"community-operators-qsxxl\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.020354 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82gvh\" (UniqueName: \"kubernetes.io/projected/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-kube-api-access-82gvh\") pod \"community-operators-qsxxl\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.201306 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.682705 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qsxxl"] Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.991414 4492 generic.go:334] "Generic (PLEG): container finished" podID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerID="0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5" exitCode=0 Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.991499 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qsxxl" event={"ID":"4a376a15-0af7-4ebe-9a22-d22c17f28c8a","Type":"ContainerDied","Data":"0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5"} Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.991836 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qsxxl" event={"ID":"4a376a15-0af7-4ebe-9a22-d22c17f28c8a","Type":"ContainerStarted","Data":"13435057f3bd4a35277f3bbff7ea27abf25d00a55d6ad6c9c179e945b2c5c07f"} Nov 26 08:49:54 crc kubenswrapper[4492]: I1126 08:49:54.996871 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 08:49:56 crc kubenswrapper[4492]: I1126 08:49:56.015048 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qsxxl" event={"ID":"4a376a15-0af7-4ebe-9a22-d22c17f28c8a","Type":"ContainerStarted","Data":"992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89"} Nov 26 08:49:57 crc kubenswrapper[4492]: I1126 08:49:57.024633 4492 generic.go:334] "Generic (PLEG): container finished" podID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerID="992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89" exitCode=0 Nov 26 08:49:57 crc kubenswrapper[4492]: I1126 08:49:57.024713 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qsxxl" event={"ID":"4a376a15-0af7-4ebe-9a22-d22c17f28c8a","Type":"ContainerDied","Data":"992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89"} Nov 26 08:49:57 crc kubenswrapper[4492]: I1126 08:49:57.438901 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:49:57 crc kubenswrapper[4492]: E1126 08:49:57.439438 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:49:58 crc kubenswrapper[4492]: I1126 08:49:58.043073 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qsxxl" event={"ID":"4a376a15-0af7-4ebe-9a22-d22c17f28c8a","Type":"ContainerStarted","Data":"b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42"} Nov 26 08:49:58 crc kubenswrapper[4492]: I1126 08:49:58.088022 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qsxxl" podStartSLOduration=2.572537425 podStartE2EDuration="5.087984933s" podCreationTimestamp="2025-11-26 08:49:53 +0000 UTC" firstStartedPulling="2025-11-26 08:49:54.993759634 +0000 UTC m=+7290.877647933" lastFinishedPulling="2025-11-26 08:49:57.509207143 +0000 UTC m=+7293.393095441" observedRunningTime="2025-11-26 08:49:58.070603324 +0000 UTC m=+7293.954491621" watchObservedRunningTime="2025-11-26 08:49:58.087984933 +0000 UTC m=+7293.971873231" Nov 26 08:50:04 crc kubenswrapper[4492]: I1126 08:50:04.201984 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:50:04 crc kubenswrapper[4492]: I1126 08:50:04.202439 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:50:04 crc kubenswrapper[4492]: I1126 08:50:04.244790 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:50:05 crc kubenswrapper[4492]: I1126 08:50:05.169213 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:50:05 crc kubenswrapper[4492]: I1126 08:50:05.221158 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qsxxl"] Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.142259 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qsxxl" podUID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerName="registry-server" containerID="cri-o://b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42" gracePeriod=2 Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.718347 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.730091 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82gvh\" (UniqueName: \"kubernetes.io/projected/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-kube-api-access-82gvh\") pod \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.730233 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-catalog-content\") pod \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.730331 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-utilities\") pod \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\" (UID: \"4a376a15-0af7-4ebe-9a22-d22c17f28c8a\") " Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.731455 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-utilities" (OuterVolumeSpecName: "utilities") pod "4a376a15-0af7-4ebe-9a22-d22c17f28c8a" (UID: "4a376a15-0af7-4ebe-9a22-d22c17f28c8a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.737551 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-kube-api-access-82gvh" (OuterVolumeSpecName: "kube-api-access-82gvh") pod "4a376a15-0af7-4ebe-9a22-d22c17f28c8a" (UID: "4a376a15-0af7-4ebe-9a22-d22c17f28c8a"). InnerVolumeSpecName "kube-api-access-82gvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.792983 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a376a15-0af7-4ebe-9a22-d22c17f28c8a" (UID: "4a376a15-0af7-4ebe-9a22-d22c17f28c8a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.833785 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82gvh\" (UniqueName: \"kubernetes.io/projected/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-kube-api-access-82gvh\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.833825 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:07 crc kubenswrapper[4492]: I1126 08:50:07.833838 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a376a15-0af7-4ebe-9a22-d22c17f28c8a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.153266 4492 generic.go:334] "Generic (PLEG): container finished" podID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerID="b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42" exitCode=0 Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.153365 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qsxxl" event={"ID":"4a376a15-0af7-4ebe-9a22-d22c17f28c8a","Type":"ContainerDied","Data":"b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42"} Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.153381 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qsxxl" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.153425 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qsxxl" event={"ID":"4a376a15-0af7-4ebe-9a22-d22c17f28c8a","Type":"ContainerDied","Data":"13435057f3bd4a35277f3bbff7ea27abf25d00a55d6ad6c9c179e945b2c5c07f"} Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.153454 4492 scope.go:117] "RemoveContainer" containerID="b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.188080 4492 scope.go:117] "RemoveContainer" containerID="992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.201340 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qsxxl"] Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.216568 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qsxxl"] Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.218439 4492 scope.go:117] "RemoveContainer" containerID="0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.261820 4492 scope.go:117] "RemoveContainer" containerID="b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42" Nov 26 08:50:08 crc kubenswrapper[4492]: E1126 08:50:08.267250 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42\": container with ID starting with b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42 not found: ID does not exist" containerID="b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.267310 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42"} err="failed to get container status \"b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42\": rpc error: code = NotFound desc = could not find container \"b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42\": container with ID starting with b894ab99a2b78e9f757a3751b9837285cf5e5ca0c80d6884f94b17abcaa88e42 not found: ID does not exist" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.267342 4492 scope.go:117] "RemoveContainer" containerID="992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89" Nov 26 08:50:08 crc kubenswrapper[4492]: E1126 08:50:08.268861 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89\": container with ID starting with 992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89 not found: ID does not exist" containerID="992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.268900 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89"} err="failed to get container status \"992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89\": rpc error: code = NotFound desc = could not find container \"992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89\": container with ID starting with 992397b301d862a80e7ba6b5d1d9c156125293e6002d0e49fc390bd52d219e89 not found: ID does not exist" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.268928 4492 scope.go:117] "RemoveContainer" containerID="0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5" Nov 26 08:50:08 crc kubenswrapper[4492]: E1126 08:50:08.269850 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5\": container with ID starting with 0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5 not found: ID does not exist" containerID="0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.269925 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5"} err="failed to get container status \"0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5\": rpc error: code = NotFound desc = could not find container \"0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5\": container with ID starting with 0bde68a32907dc6362b297b4973a43c0d0e840cebae9f682098c85c02cbc38f5 not found: ID does not exist" Nov 26 08:50:08 crc kubenswrapper[4492]: I1126 08:50:08.452070 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" path="/var/lib/kubelet/pods/4a376a15-0af7-4ebe-9a22-d22c17f28c8a/volumes" Nov 26 08:50:09 crc kubenswrapper[4492]: I1126 08:50:09.439222 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:50:09 crc kubenswrapper[4492]: E1126 08:50:09.439890 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:50:20 crc kubenswrapper[4492]: I1126 08:50:20.439816 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:50:20 crc kubenswrapper[4492]: E1126 08:50:20.440977 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:50:32 crc kubenswrapper[4492]: I1126 08:50:32.438510 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:50:32 crc kubenswrapper[4492]: E1126 08:50:32.439398 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:50:46 crc kubenswrapper[4492]: I1126 08:50:46.439266 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:50:46 crc kubenswrapper[4492]: E1126 08:50:46.440439 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:50:57 crc kubenswrapper[4492]: I1126 08:50:57.464423 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:50:58 crc kubenswrapper[4492]: I1126 08:50:58.663413 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"667202a17aa587122fc36bffa4a3b667ed3cc7a7aac3f59d8e5bc6b6531921b3"} Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.715765 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dcfvl"] Nov 26 08:52:00 crc kubenswrapper[4492]: E1126 08:52:00.721023 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerName="registry-server" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.721059 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerName="registry-server" Nov 26 08:52:00 crc kubenswrapper[4492]: E1126 08:52:00.721090 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerName="extract-utilities" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.721097 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerName="extract-utilities" Nov 26 08:52:00 crc kubenswrapper[4492]: E1126 08:52:00.721109 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerName="extract-content" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.721115 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerName="extract-content" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.721934 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a376a15-0af7-4ebe-9a22-d22c17f28c8a" containerName="registry-server" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.726993 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.742922 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dcfvl"] Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.771030 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-catalog-content\") pod \"certified-operators-dcfvl\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.771129 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gz2w\" (UniqueName: \"kubernetes.io/projected/3eb492a4-e5a8-49c1-b180-590b4885749b-kube-api-access-8gz2w\") pod \"certified-operators-dcfvl\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.771464 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-utilities\") pod \"certified-operators-dcfvl\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.872962 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-utilities\") pod \"certified-operators-dcfvl\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.873237 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-catalog-content\") pod \"certified-operators-dcfvl\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.873285 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gz2w\" (UniqueName: \"kubernetes.io/projected/3eb492a4-e5a8-49c1-b180-590b4885749b-kube-api-access-8gz2w\") pod \"certified-operators-dcfvl\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.876026 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-utilities\") pod \"certified-operators-dcfvl\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.876656 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-catalog-content\") pod \"certified-operators-dcfvl\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:00 crc kubenswrapper[4492]: I1126 08:52:00.907108 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gz2w\" (UniqueName: \"kubernetes.io/projected/3eb492a4-e5a8-49c1-b180-590b4885749b-kube-api-access-8gz2w\") pod \"certified-operators-dcfvl\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:01 crc kubenswrapper[4492]: I1126 08:52:01.045507 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:01 crc kubenswrapper[4492]: I1126 08:52:01.797105 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dcfvl"] Nov 26 08:52:01 crc kubenswrapper[4492]: W1126 08:52:01.813754 4492 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3eb492a4_e5a8_49c1_b180_590b4885749b.slice/crio-1d0e92647a9e582d4a2cecb14f808daf5b2cd1c6ac5382ad02bee6ec7560e961 WatchSource:0}: Error finding container 1d0e92647a9e582d4a2cecb14f808daf5b2cd1c6ac5382ad02bee6ec7560e961: Status 404 returned error can't find the container with id 1d0e92647a9e582d4a2cecb14f808daf5b2cd1c6ac5382ad02bee6ec7560e961 Nov 26 08:52:02 crc kubenswrapper[4492]: I1126 08:52:02.281119 4492 generic.go:334] "Generic (PLEG): container finished" podID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerID="666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3" exitCode=0 Nov 26 08:52:02 crc kubenswrapper[4492]: I1126 08:52:02.281234 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcfvl" event={"ID":"3eb492a4-e5a8-49c1-b180-590b4885749b","Type":"ContainerDied","Data":"666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3"} Nov 26 08:52:02 crc kubenswrapper[4492]: I1126 08:52:02.281281 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcfvl" event={"ID":"3eb492a4-e5a8-49c1-b180-590b4885749b","Type":"ContainerStarted","Data":"1d0e92647a9e582d4a2cecb14f808daf5b2cd1c6ac5382ad02bee6ec7560e961"} Nov 26 08:52:03 crc kubenswrapper[4492]: I1126 08:52:03.290425 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcfvl" event={"ID":"3eb492a4-e5a8-49c1-b180-590b4885749b","Type":"ContainerStarted","Data":"2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be"} Nov 26 08:52:04 crc kubenswrapper[4492]: I1126 08:52:04.305076 4492 generic.go:334] "Generic (PLEG): container finished" podID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerID="2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be" exitCode=0 Nov 26 08:52:04 crc kubenswrapper[4492]: I1126 08:52:04.305214 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcfvl" event={"ID":"3eb492a4-e5a8-49c1-b180-590b4885749b","Type":"ContainerDied","Data":"2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be"} Nov 26 08:52:05 crc kubenswrapper[4492]: I1126 08:52:05.325771 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcfvl" event={"ID":"3eb492a4-e5a8-49c1-b180-590b4885749b","Type":"ContainerStarted","Data":"fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf"} Nov 26 08:52:05 crc kubenswrapper[4492]: I1126 08:52:05.359398 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dcfvl" podStartSLOduration=2.851518304 podStartE2EDuration="5.358123858s" podCreationTimestamp="2025-11-26 08:52:00 +0000 UTC" firstStartedPulling="2025-11-26 08:52:02.294618375 +0000 UTC m=+7418.178506683" lastFinishedPulling="2025-11-26 08:52:04.80122394 +0000 UTC m=+7420.685112237" observedRunningTime="2025-11-26 08:52:05.34942905 +0000 UTC m=+7421.233317348" watchObservedRunningTime="2025-11-26 08:52:05.358123858 +0000 UTC m=+7421.242012155" Nov 26 08:52:11 crc kubenswrapper[4492]: I1126 08:52:11.046334 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:11 crc kubenswrapper[4492]: I1126 08:52:11.047017 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:11 crc kubenswrapper[4492]: I1126 08:52:11.093715 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:11 crc kubenswrapper[4492]: I1126 08:52:11.440927 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:11 crc kubenswrapper[4492]: I1126 08:52:11.497968 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dcfvl"] Nov 26 08:52:13 crc kubenswrapper[4492]: I1126 08:52:13.417121 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dcfvl" podUID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerName="registry-server" containerID="cri-o://fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf" gracePeriod=2 Nov 26 08:52:13 crc kubenswrapper[4492]: I1126 08:52:13.909918 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.092945 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-catalog-content\") pod \"3eb492a4-e5a8-49c1-b180-590b4885749b\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.093433 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-utilities\") pod \"3eb492a4-e5a8-49c1-b180-590b4885749b\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.093590 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gz2w\" (UniqueName: \"kubernetes.io/projected/3eb492a4-e5a8-49c1-b180-590b4885749b-kube-api-access-8gz2w\") pod \"3eb492a4-e5a8-49c1-b180-590b4885749b\" (UID: \"3eb492a4-e5a8-49c1-b180-590b4885749b\") " Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.094753 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-utilities" (OuterVolumeSpecName: "utilities") pod "3eb492a4-e5a8-49c1-b180-590b4885749b" (UID: "3eb492a4-e5a8-49c1-b180-590b4885749b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.102839 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3eb492a4-e5a8-49c1-b180-590b4885749b-kube-api-access-8gz2w" (OuterVolumeSpecName: "kube-api-access-8gz2w") pod "3eb492a4-e5a8-49c1-b180-590b4885749b" (UID: "3eb492a4-e5a8-49c1-b180-590b4885749b"). InnerVolumeSpecName "kube-api-access-8gz2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.143312 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3eb492a4-e5a8-49c1-b180-590b4885749b" (UID: "3eb492a4-e5a8-49c1-b180-590b4885749b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.197294 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.199330 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3eb492a4-e5a8-49c1-b180-590b4885749b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.199420 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gz2w\" (UniqueName: \"kubernetes.io/projected/3eb492a4-e5a8-49c1-b180-590b4885749b-kube-api-access-8gz2w\") on node \"crc\" DevicePath \"\"" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.432303 4492 generic.go:334] "Generic (PLEG): container finished" podID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerID="fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf" exitCode=0 Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.432384 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcfvl" event={"ID":"3eb492a4-e5a8-49c1-b180-590b4885749b","Type":"ContainerDied","Data":"fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf"} Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.432396 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dcfvl" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.432436 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dcfvl" event={"ID":"3eb492a4-e5a8-49c1-b180-590b4885749b","Type":"ContainerDied","Data":"1d0e92647a9e582d4a2cecb14f808daf5b2cd1c6ac5382ad02bee6ec7560e961"} Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.432904 4492 scope.go:117] "RemoveContainer" containerID="fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.481646 4492 scope.go:117] "RemoveContainer" containerID="2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.481823 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dcfvl"] Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.491314 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dcfvl"] Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.501027 4492 scope.go:117] "RemoveContainer" containerID="666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.540865 4492 scope.go:117] "RemoveContainer" containerID="fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf" Nov 26 08:52:14 crc kubenswrapper[4492]: E1126 08:52:14.542832 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf\": container with ID starting with fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf not found: ID does not exist" containerID="fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.543300 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf"} err="failed to get container status \"fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf\": rpc error: code = NotFound desc = could not find container \"fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf\": container with ID starting with fe043b4e591920cd82d9c3afe1aa260057ab9ec9b3916c4472b90898970b3fdf not found: ID does not exist" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.543345 4492 scope.go:117] "RemoveContainer" containerID="2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be" Nov 26 08:52:14 crc kubenswrapper[4492]: E1126 08:52:14.543788 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be\": container with ID starting with 2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be not found: ID does not exist" containerID="2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.543833 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be"} err="failed to get container status \"2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be\": rpc error: code = NotFound desc = could not find container \"2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be\": container with ID starting with 2b2204ef1c937905f3e997c62beb2141b897efa67277f57f29aa88d6d0dd19be not found: ID does not exist" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.543864 4492 scope.go:117] "RemoveContainer" containerID="666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3" Nov 26 08:52:14 crc kubenswrapper[4492]: E1126 08:52:14.544265 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3\": container with ID starting with 666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3 not found: ID does not exist" containerID="666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3" Nov 26 08:52:14 crc kubenswrapper[4492]: I1126 08:52:14.544292 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3"} err="failed to get container status \"666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3\": rpc error: code = NotFound desc = could not find container \"666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3\": container with ID starting with 666c661ec3e19209249b35ebbc857d656c06addae15713a2fb42c94933437fa3 not found: ID does not exist" Nov 26 08:52:16 crc kubenswrapper[4492]: I1126 08:52:16.452079 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3eb492a4-e5a8-49c1-b180-590b4885749b" path="/var/lib/kubelet/pods/3eb492a4-e5a8-49c1-b180-590b4885749b/volumes" Nov 26 08:53:19 crc kubenswrapper[4492]: I1126 08:53:19.441522 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:53:19 crc kubenswrapper[4492]: I1126 08:53:19.442141 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:53:49 crc kubenswrapper[4492]: I1126 08:53:49.441390 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:53:49 crc kubenswrapper[4492]: I1126 08:53:49.442228 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.589947 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mrl9g"] Nov 26 08:54:14 crc kubenswrapper[4492]: E1126 08:54:14.590975 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerName="extract-utilities" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.590994 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerName="extract-utilities" Nov 26 08:54:14 crc kubenswrapper[4492]: E1126 08:54:14.591027 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerName="registry-server" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.591033 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerName="registry-server" Nov 26 08:54:14 crc kubenswrapper[4492]: E1126 08:54:14.591063 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerName="extract-content" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.591070 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerName="extract-content" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.591327 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="3eb492a4-e5a8-49c1-b180-590b4885749b" containerName="registry-server" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.593121 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.620960 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whkk4\" (UniqueName: \"kubernetes.io/projected/b5678375-2033-406a-b318-c1e9136437da-kube-api-access-whkk4\") pod \"redhat-operators-mrl9g\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.621327 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-catalog-content\") pod \"redhat-operators-mrl9g\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.621382 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-utilities\") pod \"redhat-operators-mrl9g\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.622927 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mrl9g"] Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.724464 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whkk4\" (UniqueName: \"kubernetes.io/projected/b5678375-2033-406a-b318-c1e9136437da-kube-api-access-whkk4\") pod \"redhat-operators-mrl9g\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.724736 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-utilities\") pod \"redhat-operators-mrl9g\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.724768 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-catalog-content\") pod \"redhat-operators-mrl9g\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.725464 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-catalog-content\") pod \"redhat-operators-mrl9g\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.726079 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-utilities\") pod \"redhat-operators-mrl9g\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.745963 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whkk4\" (UniqueName: \"kubernetes.io/projected/b5678375-2033-406a-b318-c1e9136437da-kube-api-access-whkk4\") pod \"redhat-operators-mrl9g\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:14 crc kubenswrapper[4492]: I1126 08:54:14.911078 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:15 crc kubenswrapper[4492]: I1126 08:54:15.423741 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mrl9g"] Nov 26 08:54:15 crc kubenswrapper[4492]: I1126 08:54:15.679987 4492 generic.go:334] "Generic (PLEG): container finished" podID="b5678375-2033-406a-b318-c1e9136437da" containerID="666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333" exitCode=0 Nov 26 08:54:15 crc kubenswrapper[4492]: I1126 08:54:15.680083 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrl9g" event={"ID":"b5678375-2033-406a-b318-c1e9136437da","Type":"ContainerDied","Data":"666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333"} Nov 26 08:54:15 crc kubenswrapper[4492]: I1126 08:54:15.680426 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrl9g" event={"ID":"b5678375-2033-406a-b318-c1e9136437da","Type":"ContainerStarted","Data":"f5f3913c45055c726f42b1f6e9da6c66c68b22856945a865462368a545b9209f"} Nov 26 08:54:17 crc kubenswrapper[4492]: I1126 08:54:17.704775 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrl9g" event={"ID":"b5678375-2033-406a-b318-c1e9136437da","Type":"ContainerStarted","Data":"b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7"} Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.441656 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.442045 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.442097 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.442734 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"667202a17aa587122fc36bffa4a3b667ed3cc7a7aac3f59d8e5bc6b6531921b3"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.442796 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://667202a17aa587122fc36bffa4a3b667ed3cc7a7aac3f59d8e5bc6b6531921b3" gracePeriod=600 Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.749337 4492 generic.go:334] "Generic (PLEG): container finished" podID="b5678375-2033-406a-b318-c1e9136437da" containerID="b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7" exitCode=0 Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.749618 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrl9g" event={"ID":"b5678375-2033-406a-b318-c1e9136437da","Type":"ContainerDied","Data":"b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7"} Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.760852 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="667202a17aa587122fc36bffa4a3b667ed3cc7a7aac3f59d8e5bc6b6531921b3" exitCode=0 Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.760903 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"667202a17aa587122fc36bffa4a3b667ed3cc7a7aac3f59d8e5bc6b6531921b3"} Nov 26 08:54:19 crc kubenswrapper[4492]: I1126 08:54:19.760943 4492 scope.go:117] "RemoveContainer" containerID="ef96c0d9cafae12ebbbc7e77ec2f7f4753b6a2e65b5ea75228b7ab77555ee739" Nov 26 08:54:20 crc kubenswrapper[4492]: I1126 08:54:20.777947 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4"} Nov 26 08:54:21 crc kubenswrapper[4492]: I1126 08:54:21.791110 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrl9g" event={"ID":"b5678375-2033-406a-b318-c1e9136437da","Type":"ContainerStarted","Data":"37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720"} Nov 26 08:54:21 crc kubenswrapper[4492]: I1126 08:54:21.831437 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mrl9g" podStartSLOduration=2.491058229 podStartE2EDuration="7.831411373s" podCreationTimestamp="2025-11-26 08:54:14 +0000 UTC" firstStartedPulling="2025-11-26 08:54:15.68211049 +0000 UTC m=+7551.565998789" lastFinishedPulling="2025-11-26 08:54:21.022463635 +0000 UTC m=+7556.906351933" observedRunningTime="2025-11-26 08:54:21.820276638 +0000 UTC m=+7557.704164936" watchObservedRunningTime="2025-11-26 08:54:21.831411373 +0000 UTC m=+7557.715299671" Nov 26 08:54:24 crc kubenswrapper[4492]: I1126 08:54:24.913462 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:24 crc kubenswrapper[4492]: I1126 08:54:24.914663 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:25 crc kubenswrapper[4492]: I1126 08:54:25.960192 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mrl9g" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="registry-server" probeResult="failure" output=< Nov 26 08:54:25 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:54:25 crc kubenswrapper[4492]: > Nov 26 08:54:28 crc kubenswrapper[4492]: I1126 08:54:28.591517 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="11fb7794-a2db-4320-8946-91b18bb44afa" containerName="galera" probeResult="failure" output="command timed out" Nov 26 08:54:35 crc kubenswrapper[4492]: I1126 08:54:35.988304 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mrl9g" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="registry-server" probeResult="failure" output=< Nov 26 08:54:35 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 08:54:35 crc kubenswrapper[4492]: > Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.406840 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lfdl4"] Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.421445 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.472188 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj5vm\" (UniqueName: \"kubernetes.io/projected/bf595f70-d80b-4c21-b2b6-0df534e728b0-kube-api-access-tj5vm\") pod \"redhat-marketplace-lfdl4\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.472617 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-utilities\") pod \"redhat-marketplace-lfdl4\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.472785 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-catalog-content\") pod \"redhat-marketplace-lfdl4\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.549587 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfdl4"] Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.575264 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj5vm\" (UniqueName: \"kubernetes.io/projected/bf595f70-d80b-4c21-b2b6-0df534e728b0-kube-api-access-tj5vm\") pod \"redhat-marketplace-lfdl4\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.602514 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-utilities\") pod \"redhat-marketplace-lfdl4\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.602594 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-catalog-content\") pod \"redhat-marketplace-lfdl4\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.609552 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-utilities\") pod \"redhat-marketplace-lfdl4\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.610058 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-catalog-content\") pod \"redhat-marketplace-lfdl4\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.672645 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj5vm\" (UniqueName: \"kubernetes.io/projected/bf595f70-d80b-4c21-b2b6-0df534e728b0-kube-api-access-tj5vm\") pod \"redhat-marketplace-lfdl4\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:39 crc kubenswrapper[4492]: I1126 08:54:39.755804 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:40 crc kubenswrapper[4492]: I1126 08:54:40.668768 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfdl4"] Nov 26 08:54:41 crc kubenswrapper[4492]: I1126 08:54:41.000993 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfdl4" event={"ID":"bf595f70-d80b-4c21-b2b6-0df534e728b0","Type":"ContainerDied","Data":"220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca"} Nov 26 08:54:41 crc kubenswrapper[4492]: I1126 08:54:41.001597 4492 generic.go:334] "Generic (PLEG): container finished" podID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerID="220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca" exitCode=0 Nov 26 08:54:41 crc kubenswrapper[4492]: I1126 08:54:41.003358 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfdl4" event={"ID":"bf595f70-d80b-4c21-b2b6-0df534e728b0","Type":"ContainerStarted","Data":"dcfcd96f7f5b9e9f162e76523cd4b5024896f72dbce0c85f77f675f81fe8b5b8"} Nov 26 08:54:43 crc kubenswrapper[4492]: I1126 08:54:43.029708 4492 generic.go:334] "Generic (PLEG): container finished" podID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerID="9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef" exitCode=0 Nov 26 08:54:43 crc kubenswrapper[4492]: I1126 08:54:43.029823 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfdl4" event={"ID":"bf595f70-d80b-4c21-b2b6-0df534e728b0","Type":"ContainerDied","Data":"9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef"} Nov 26 08:54:45 crc kubenswrapper[4492]: I1126 08:54:45.012130 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:45 crc kubenswrapper[4492]: I1126 08:54:45.064404 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfdl4" event={"ID":"bf595f70-d80b-4c21-b2b6-0df534e728b0","Type":"ContainerStarted","Data":"c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9"} Nov 26 08:54:45 crc kubenswrapper[4492]: I1126 08:54:45.067540 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:45 crc kubenswrapper[4492]: I1126 08:54:45.087993 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lfdl4" podStartSLOduration=3.131000978 podStartE2EDuration="6.087387154s" podCreationTimestamp="2025-11-26 08:54:39 +0000 UTC" firstStartedPulling="2025-11-26 08:54:41.003877575 +0000 UTC m=+7576.887765872" lastFinishedPulling="2025-11-26 08:54:43.96026375 +0000 UTC m=+7579.844152048" observedRunningTime="2025-11-26 08:54:45.081098472 +0000 UTC m=+7580.964986769" watchObservedRunningTime="2025-11-26 08:54:45.087387154 +0000 UTC m=+7580.971275453" Nov 26 08:54:46 crc kubenswrapper[4492]: I1126 08:54:46.038968 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mrl9g"] Nov 26 08:54:46 crc kubenswrapper[4492]: I1126 08:54:46.078215 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mrl9g" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="registry-server" containerID="cri-o://37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720" gracePeriod=2 Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.048554 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.087701 4492 generic.go:334] "Generic (PLEG): container finished" podID="b5678375-2033-406a-b318-c1e9136437da" containerID="37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720" exitCode=0 Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.087749 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrl9g" event={"ID":"b5678375-2033-406a-b318-c1e9136437da","Type":"ContainerDied","Data":"37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720"} Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.087795 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrl9g" event={"ID":"b5678375-2033-406a-b318-c1e9136437da","Type":"ContainerDied","Data":"f5f3913c45055c726f42b1f6e9da6c66c68b22856945a865462368a545b9209f"} Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.087795 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrl9g" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.088269 4492 scope.go:117] "RemoveContainer" containerID="37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.119211 4492 scope.go:117] "RemoveContainer" containerID="b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.152868 4492 scope.go:117] "RemoveContainer" containerID="666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.175346 4492 scope.go:117] "RemoveContainer" containerID="37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720" Nov 26 08:54:47 crc kubenswrapper[4492]: E1126 08:54:47.179460 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720\": container with ID starting with 37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720 not found: ID does not exist" containerID="37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.180297 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720"} err="failed to get container status \"37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720\": rpc error: code = NotFound desc = could not find container \"37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720\": container with ID starting with 37138ec71c0982075e8d38534d18c850f14cffa61c3fb16185b07c87f5e32720 not found: ID does not exist" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.180341 4492 scope.go:117] "RemoveContainer" containerID="b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7" Nov 26 08:54:47 crc kubenswrapper[4492]: E1126 08:54:47.180870 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7\": container with ID starting with b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7 not found: ID does not exist" containerID="b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.180929 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7"} err="failed to get container status \"b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7\": rpc error: code = NotFound desc = could not find container \"b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7\": container with ID starting with b3449ff028cd837e6cae205094cdfc5a5384740575b1650882add8be0e16fff7 not found: ID does not exist" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.180971 4492 scope.go:117] "RemoveContainer" containerID="666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333" Nov 26 08:54:47 crc kubenswrapper[4492]: E1126 08:54:47.181707 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333\": container with ID starting with 666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333 not found: ID does not exist" containerID="666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.181755 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333"} err="failed to get container status \"666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333\": rpc error: code = NotFound desc = could not find container \"666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333\": container with ID starting with 666f5a674fd309c8265ca3552cfd8fdba19edf6657a8466db45f326e37c19333 not found: ID does not exist" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.211026 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whkk4\" (UniqueName: \"kubernetes.io/projected/b5678375-2033-406a-b318-c1e9136437da-kube-api-access-whkk4\") pod \"b5678375-2033-406a-b318-c1e9136437da\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.211126 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-catalog-content\") pod \"b5678375-2033-406a-b318-c1e9136437da\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.211367 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-utilities\") pod \"b5678375-2033-406a-b318-c1e9136437da\" (UID: \"b5678375-2033-406a-b318-c1e9136437da\") " Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.213363 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-utilities" (OuterVolumeSpecName: "utilities") pod "b5678375-2033-406a-b318-c1e9136437da" (UID: "b5678375-2033-406a-b318-c1e9136437da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.227373 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5678375-2033-406a-b318-c1e9136437da-kube-api-access-whkk4" (OuterVolumeSpecName: "kube-api-access-whkk4") pod "b5678375-2033-406a-b318-c1e9136437da" (UID: "b5678375-2033-406a-b318-c1e9136437da"). InnerVolumeSpecName "kube-api-access-whkk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.260467 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b5678375-2033-406a-b318-c1e9136437da" (UID: "b5678375-2033-406a-b318-c1e9136437da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.314672 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whkk4\" (UniqueName: \"kubernetes.io/projected/b5678375-2033-406a-b318-c1e9136437da-kube-api-access-whkk4\") on node \"crc\" DevicePath \"\"" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.314698 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.314708 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5678375-2033-406a-b318-c1e9136437da-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.423668 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mrl9g"] Nov 26 08:54:47 crc kubenswrapper[4492]: I1126 08:54:47.437138 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mrl9g"] Nov 26 08:54:48 crc kubenswrapper[4492]: I1126 08:54:48.455862 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5678375-2033-406a-b318-c1e9136437da" path="/var/lib/kubelet/pods/b5678375-2033-406a-b318-c1e9136437da/volumes" Nov 26 08:54:49 crc kubenswrapper[4492]: I1126 08:54:49.757952 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:49 crc kubenswrapper[4492]: I1126 08:54:49.758201 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:49 crc kubenswrapper[4492]: I1126 08:54:49.800706 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:50 crc kubenswrapper[4492]: I1126 08:54:50.166805 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:50 crc kubenswrapper[4492]: I1126 08:54:50.836974 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfdl4"] Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.137656 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lfdl4" podUID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerName="registry-server" containerID="cri-o://c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9" gracePeriod=2 Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.654379 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.738668 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj5vm\" (UniqueName: \"kubernetes.io/projected/bf595f70-d80b-4c21-b2b6-0df534e728b0-kube-api-access-tj5vm\") pod \"bf595f70-d80b-4c21-b2b6-0df534e728b0\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.738917 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-catalog-content\") pod \"bf595f70-d80b-4c21-b2b6-0df534e728b0\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.739067 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-utilities\") pod \"bf595f70-d80b-4c21-b2b6-0df534e728b0\" (UID: \"bf595f70-d80b-4c21-b2b6-0df534e728b0\") " Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.740468 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-utilities" (OuterVolumeSpecName: "utilities") pod "bf595f70-d80b-4c21-b2b6-0df534e728b0" (UID: "bf595f70-d80b-4c21-b2b6-0df534e728b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.748359 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf595f70-d80b-4c21-b2b6-0df534e728b0-kube-api-access-tj5vm" (OuterVolumeSpecName: "kube-api-access-tj5vm") pod "bf595f70-d80b-4c21-b2b6-0df534e728b0" (UID: "bf595f70-d80b-4c21-b2b6-0df534e728b0"). InnerVolumeSpecName "kube-api-access-tj5vm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.760407 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bf595f70-d80b-4c21-b2b6-0df534e728b0" (UID: "bf595f70-d80b-4c21-b2b6-0df534e728b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.843086 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.843134 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf595f70-d80b-4c21-b2b6-0df534e728b0-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 08:54:52 crc kubenswrapper[4492]: I1126 08:54:52.843151 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj5vm\" (UniqueName: \"kubernetes.io/projected/bf595f70-d80b-4c21-b2b6-0df534e728b0-kube-api-access-tj5vm\") on node \"crc\" DevicePath \"\"" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.150554 4492 generic.go:334] "Generic (PLEG): container finished" podID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerID="c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9" exitCode=0 Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.150607 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfdl4" event={"ID":"bf595f70-d80b-4c21-b2b6-0df534e728b0","Type":"ContainerDied","Data":"c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9"} Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.150634 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfdl4" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.150659 4492 scope.go:117] "RemoveContainer" containerID="c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.150643 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfdl4" event={"ID":"bf595f70-d80b-4c21-b2b6-0df534e728b0","Type":"ContainerDied","Data":"dcfcd96f7f5b9e9f162e76523cd4b5024896f72dbce0c85f77f675f81fe8b5b8"} Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.187402 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfdl4"] Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.187480 4492 scope.go:117] "RemoveContainer" containerID="9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.195670 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfdl4"] Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.214004 4492 scope.go:117] "RemoveContainer" containerID="220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.246318 4492 scope.go:117] "RemoveContainer" containerID="c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9" Nov 26 08:54:53 crc kubenswrapper[4492]: E1126 08:54:53.246755 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9\": container with ID starting with c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9 not found: ID does not exist" containerID="c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.246788 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9"} err="failed to get container status \"c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9\": rpc error: code = NotFound desc = could not find container \"c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9\": container with ID starting with c3d16d367a0bc27d9a08097cbdbc08e8f8a1cddcaaf0051b6696ff69ece8faf9 not found: ID does not exist" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.246811 4492 scope.go:117] "RemoveContainer" containerID="9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef" Nov 26 08:54:53 crc kubenswrapper[4492]: E1126 08:54:53.247029 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef\": container with ID starting with 9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef not found: ID does not exist" containerID="9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.247050 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef"} err="failed to get container status \"9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef\": rpc error: code = NotFound desc = could not find container \"9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef\": container with ID starting with 9bc7d681b381a246a22944c0950ebbce9f89618122fafb6e820fd36bbcfd53ef not found: ID does not exist" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.247063 4492 scope.go:117] "RemoveContainer" containerID="220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca" Nov 26 08:54:53 crc kubenswrapper[4492]: E1126 08:54:53.247386 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca\": container with ID starting with 220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca not found: ID does not exist" containerID="220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca" Nov 26 08:54:53 crc kubenswrapper[4492]: I1126 08:54:53.247414 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca"} err="failed to get container status \"220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca\": rpc error: code = NotFound desc = could not find container \"220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca\": container with ID starting with 220be4289656f685c7418bc132a6d160717b936868767c0b1ae071f8058dfeca not found: ID does not exist" Nov 26 08:54:54 crc kubenswrapper[4492]: I1126 08:54:54.467717 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf595f70-d80b-4c21-b2b6-0df534e728b0" path="/var/lib/kubelet/pods/bf595f70-d80b-4c21-b2b6-0df534e728b0/volumes" Nov 26 08:56:19 crc kubenswrapper[4492]: I1126 08:56:19.443981 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:56:19 crc kubenswrapper[4492]: I1126 08:56:19.446858 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:56:49 crc kubenswrapper[4492]: I1126 08:56:49.441825 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:56:49 crc kubenswrapper[4492]: I1126 08:56:49.442593 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:57:19 crc kubenswrapper[4492]: I1126 08:57:19.441073 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 08:57:19 crc kubenswrapper[4492]: I1126 08:57:19.441843 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 08:57:19 crc kubenswrapper[4492]: I1126 08:57:19.442398 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 08:57:19 crc kubenswrapper[4492]: I1126 08:57:19.444024 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 08:57:19 crc kubenswrapper[4492]: I1126 08:57:19.444305 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" gracePeriod=600 Nov 26 08:57:19 crc kubenswrapper[4492]: E1126 08:57:19.584919 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:57:19 crc kubenswrapper[4492]: I1126 08:57:19.662202 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4"} Nov 26 08:57:19 crc kubenswrapper[4492]: I1126 08:57:19.662464 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" exitCode=0 Nov 26 08:57:19 crc kubenswrapper[4492]: I1126 08:57:19.663651 4492 scope.go:117] "RemoveContainer" containerID="667202a17aa587122fc36bffa4a3b667ed3cc7a7aac3f59d8e5bc6b6531921b3" Nov 26 08:57:19 crc kubenswrapper[4492]: I1126 08:57:19.664040 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:57:19 crc kubenswrapper[4492]: E1126 08:57:19.664710 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:57:31 crc kubenswrapper[4492]: I1126 08:57:31.439006 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:57:31 crc kubenswrapper[4492]: E1126 08:57:31.440897 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:57:45 crc kubenswrapper[4492]: I1126 08:57:45.438935 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:57:45 crc kubenswrapper[4492]: E1126 08:57:45.440118 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:57:57 crc kubenswrapper[4492]: I1126 08:57:57.439770 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:57:57 crc kubenswrapper[4492]: E1126 08:57:57.441299 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:58:11 crc kubenswrapper[4492]: I1126 08:58:11.438545 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:58:11 crc kubenswrapper[4492]: E1126 08:58:11.439444 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:58:22 crc kubenswrapper[4492]: I1126 08:58:22.438991 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:58:22 crc kubenswrapper[4492]: E1126 08:58:22.440109 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:58:37 crc kubenswrapper[4492]: I1126 08:58:37.438591 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:58:37 crc kubenswrapper[4492]: E1126 08:58:37.439360 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:58:48 crc kubenswrapper[4492]: I1126 08:58:48.439073 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:58:48 crc kubenswrapper[4492]: E1126 08:58:48.439874 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:59:03 crc kubenswrapper[4492]: I1126 08:59:03.439337 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:59:03 crc kubenswrapper[4492]: E1126 08:59:03.440833 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:59:17 crc kubenswrapper[4492]: I1126 08:59:17.438937 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:59:17 crc kubenswrapper[4492]: E1126 08:59:17.439805 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:59:29 crc kubenswrapper[4492]: I1126 08:59:29.440073 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:59:29 crc kubenswrapper[4492]: E1126 08:59:29.440920 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:59:40 crc kubenswrapper[4492]: I1126 08:59:40.439842 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:59:40 crc kubenswrapper[4492]: E1126 08:59:40.440841 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 08:59:54 crc kubenswrapper[4492]: I1126 08:59:54.446885 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 08:59:54 crc kubenswrapper[4492]: E1126 08:59:54.447929 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.339199 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc"] Nov 26 09:00:00 crc kubenswrapper[4492]: E1126 09:00:00.340660 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="extract-utilities" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.340682 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="extract-utilities" Nov 26 09:00:00 crc kubenswrapper[4492]: E1126 09:00:00.340698 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="extract-content" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.340704 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="extract-content" Nov 26 09:00:00 crc kubenswrapper[4492]: E1126 09:00:00.340719 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerName="extract-utilities" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.340725 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerName="extract-utilities" Nov 26 09:00:00 crc kubenswrapper[4492]: E1126 09:00:00.340731 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="registry-server" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.340736 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="registry-server" Nov 26 09:00:00 crc kubenswrapper[4492]: E1126 09:00:00.340771 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerName="registry-server" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.340776 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerName="registry-server" Nov 26 09:00:00 crc kubenswrapper[4492]: E1126 09:00:00.340784 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerName="extract-content" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.340789 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerName="extract-content" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.341360 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5678375-2033-406a-b318-c1e9136437da" containerName="registry-server" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.341380 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf595f70-d80b-4c21-b2b6-0df534e728b0" containerName="registry-server" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.345248 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.352261 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.352272 4492 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.371722 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc"] Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.475610 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpqmz\" (UniqueName: \"kubernetes.io/projected/de3a4c48-0ef4-459b-aed2-79453a9db5f8-kube-api-access-gpqmz\") pod \"collect-profiles-29402460-65njc\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.475683 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/de3a4c48-0ef4-459b-aed2-79453a9db5f8-config-volume\") pod \"collect-profiles-29402460-65njc\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.476249 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/de3a4c48-0ef4-459b-aed2-79453a9db5f8-secret-volume\") pod \"collect-profiles-29402460-65njc\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.578942 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/de3a4c48-0ef4-459b-aed2-79453a9db5f8-secret-volume\") pod \"collect-profiles-29402460-65njc\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.579128 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpqmz\" (UniqueName: \"kubernetes.io/projected/de3a4c48-0ef4-459b-aed2-79453a9db5f8-kube-api-access-gpqmz\") pod \"collect-profiles-29402460-65njc\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.579211 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/de3a4c48-0ef4-459b-aed2-79453a9db5f8-config-volume\") pod \"collect-profiles-29402460-65njc\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.582696 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/de3a4c48-0ef4-459b-aed2-79453a9db5f8-config-volume\") pod \"collect-profiles-29402460-65njc\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.591558 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/de3a4c48-0ef4-459b-aed2-79453a9db5f8-secret-volume\") pod \"collect-profiles-29402460-65njc\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.596365 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpqmz\" (UniqueName: \"kubernetes.io/projected/de3a4c48-0ef4-459b-aed2-79453a9db5f8-kube-api-access-gpqmz\") pod \"collect-profiles-29402460-65njc\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:00 crc kubenswrapper[4492]: I1126 09:00:00.671686 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:01 crc kubenswrapper[4492]: I1126 09:00:01.400536 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc"] Nov 26 09:00:02 crc kubenswrapper[4492]: I1126 09:00:02.215933 4492 generic.go:334] "Generic (PLEG): container finished" podID="de3a4c48-0ef4-459b-aed2-79453a9db5f8" containerID="4dcd000851adc183746572bf41b62d29115af922a69268c448bb20f79d15ec29" exitCode=0 Nov 26 09:00:02 crc kubenswrapper[4492]: I1126 09:00:02.216126 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" event={"ID":"de3a4c48-0ef4-459b-aed2-79453a9db5f8","Type":"ContainerDied","Data":"4dcd000851adc183746572bf41b62d29115af922a69268c448bb20f79d15ec29"} Nov 26 09:00:02 crc kubenswrapper[4492]: I1126 09:00:02.216307 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" event={"ID":"de3a4c48-0ef4-459b-aed2-79453a9db5f8","Type":"ContainerStarted","Data":"bfef49272b02f369b9b6ea4e960b910fc55e34ee1bc897fdda2b7cb492175bc7"} Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.645956 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.671280 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/de3a4c48-0ef4-459b-aed2-79453a9db5f8-secret-volume\") pod \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.671533 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpqmz\" (UniqueName: \"kubernetes.io/projected/de3a4c48-0ef4-459b-aed2-79453a9db5f8-kube-api-access-gpqmz\") pod \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.671715 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/de3a4c48-0ef4-459b-aed2-79453a9db5f8-config-volume\") pod \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\" (UID: \"de3a4c48-0ef4-459b-aed2-79453a9db5f8\") " Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.673788 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de3a4c48-0ef4-459b-aed2-79453a9db5f8-config-volume" (OuterVolumeSpecName: "config-volume") pod "de3a4c48-0ef4-459b-aed2-79453a9db5f8" (UID: "de3a4c48-0ef4-459b-aed2-79453a9db5f8"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.681352 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de3a4c48-0ef4-459b-aed2-79453a9db5f8-kube-api-access-gpqmz" (OuterVolumeSpecName: "kube-api-access-gpqmz") pod "de3a4c48-0ef4-459b-aed2-79453a9db5f8" (UID: "de3a4c48-0ef4-459b-aed2-79453a9db5f8"). InnerVolumeSpecName "kube-api-access-gpqmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.695565 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de3a4c48-0ef4-459b-aed2-79453a9db5f8-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "de3a4c48-0ef4-459b-aed2-79453a9db5f8" (UID: "de3a4c48-0ef4-459b-aed2-79453a9db5f8"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.774771 4492 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/de3a4c48-0ef4-459b-aed2-79453a9db5f8-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.774893 4492 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/de3a4c48-0ef4-459b-aed2-79453a9db5f8-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:03 crc kubenswrapper[4492]: I1126 09:00:03.774951 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpqmz\" (UniqueName: \"kubernetes.io/projected/de3a4c48-0ef4-459b-aed2-79453a9db5f8-kube-api-access-gpqmz\") on node \"crc\" DevicePath \"\"" Nov 26 09:00:04 crc kubenswrapper[4492]: I1126 09:00:04.252105 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" event={"ID":"de3a4c48-0ef4-459b-aed2-79453a9db5f8","Type":"ContainerDied","Data":"bfef49272b02f369b9b6ea4e960b910fc55e34ee1bc897fdda2b7cb492175bc7"} Nov 26 09:00:04 crc kubenswrapper[4492]: I1126 09:00:04.252450 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfef49272b02f369b9b6ea4e960b910fc55e34ee1bc897fdda2b7cb492175bc7" Nov 26 09:00:04 crc kubenswrapper[4492]: I1126 09:00:04.252165 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402460-65njc" Nov 26 09:00:04 crc kubenswrapper[4492]: I1126 09:00:04.748624 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb"] Nov 26 09:00:04 crc kubenswrapper[4492]: I1126 09:00:04.760774 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402415-gmtvb"] Nov 26 09:00:06 crc kubenswrapper[4492]: I1126 09:00:06.439001 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:00:06 crc kubenswrapper[4492]: E1126 09:00:06.439836 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:00:06 crc kubenswrapper[4492]: I1126 09:00:06.448455 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abacab60-1227-435b-8b35-afd719bc372e" path="/var/lib/kubelet/pods/abacab60-1227-435b-8b35-afd719bc372e/volumes" Nov 26 09:00:19 crc kubenswrapper[4492]: I1126 09:00:19.439206 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:00:19 crc kubenswrapper[4492]: E1126 09:00:19.440490 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:00:33 crc kubenswrapper[4492]: I1126 09:00:33.438894 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:00:33 crc kubenswrapper[4492]: E1126 09:00:33.439562 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:00:45 crc kubenswrapper[4492]: I1126 09:00:45.439183 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:00:45 crc kubenswrapper[4492]: E1126 09:00:45.439604 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.062816 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hrv7z"] Nov 26 09:00:48 crc kubenswrapper[4492]: E1126 09:00:48.063597 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de3a4c48-0ef4-459b-aed2-79453a9db5f8" containerName="collect-profiles" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.063610 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="de3a4c48-0ef4-459b-aed2-79453a9db5f8" containerName="collect-profiles" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.063818 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="de3a4c48-0ef4-459b-aed2-79453a9db5f8" containerName="collect-profiles" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.065154 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.082990 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hrv7z"] Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.259724 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-catalog-content\") pod \"community-operators-hrv7z\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.259858 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thw6m\" (UniqueName: \"kubernetes.io/projected/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-kube-api-access-thw6m\") pod \"community-operators-hrv7z\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.259892 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-utilities\") pod \"community-operators-hrv7z\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.361592 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-catalog-content\") pod \"community-operators-hrv7z\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.361682 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thw6m\" (UniqueName: \"kubernetes.io/projected/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-kube-api-access-thw6m\") pod \"community-operators-hrv7z\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.361715 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-utilities\") pod \"community-operators-hrv7z\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.364073 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-catalog-content\") pod \"community-operators-hrv7z\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.364384 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-utilities\") pod \"community-operators-hrv7z\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.390734 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thw6m\" (UniqueName: \"kubernetes.io/projected/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-kube-api-access-thw6m\") pod \"community-operators-hrv7z\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:48 crc kubenswrapper[4492]: I1126 09:00:48.685195 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:49 crc kubenswrapper[4492]: I1126 09:00:49.161002 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hrv7z"] Nov 26 09:00:49 crc kubenswrapper[4492]: I1126 09:00:49.639936 4492 generic.go:334] "Generic (PLEG): container finished" podID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerID="f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2" exitCode=0 Nov 26 09:00:49 crc kubenswrapper[4492]: I1126 09:00:49.640243 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hrv7z" event={"ID":"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e","Type":"ContainerDied","Data":"f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2"} Nov 26 09:00:49 crc kubenswrapper[4492]: I1126 09:00:49.640273 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hrv7z" event={"ID":"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e","Type":"ContainerStarted","Data":"d7f1faf00bcf1dc598929667420ba0df1bdb1c19b0900254ebe406dbf228c988"} Nov 26 09:00:49 crc kubenswrapper[4492]: I1126 09:00:49.645652 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:00:51 crc kubenswrapper[4492]: I1126 09:00:51.659442 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hrv7z" event={"ID":"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e","Type":"ContainerStarted","Data":"041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f"} Nov 26 09:00:52 crc kubenswrapper[4492]: I1126 09:00:52.670204 4492 generic.go:334] "Generic (PLEG): container finished" podID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerID="041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f" exitCode=0 Nov 26 09:00:52 crc kubenswrapper[4492]: I1126 09:00:52.670275 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hrv7z" event={"ID":"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e","Type":"ContainerDied","Data":"041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f"} Nov 26 09:00:53 crc kubenswrapper[4492]: I1126 09:00:53.682297 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hrv7z" event={"ID":"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e","Type":"ContainerStarted","Data":"1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5"} Nov 26 09:00:53 crc kubenswrapper[4492]: I1126 09:00:53.706797 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hrv7z" podStartSLOduration=2.126631869 podStartE2EDuration="5.70619961s" podCreationTimestamp="2025-11-26 09:00:48 +0000 UTC" firstStartedPulling="2025-11-26 09:00:49.642582148 +0000 UTC m=+7945.526470446" lastFinishedPulling="2025-11-26 09:00:53.22214989 +0000 UTC m=+7949.106038187" observedRunningTime="2025-11-26 09:00:53.70325294 +0000 UTC m=+7949.587141238" watchObservedRunningTime="2025-11-26 09:00:53.70619961 +0000 UTC m=+7949.590087909" Nov 26 09:00:56 crc kubenswrapper[4492]: I1126 09:00:56.441321 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:00:56 crc kubenswrapper[4492]: E1126 09:00:56.442058 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:00:58 crc kubenswrapper[4492]: I1126 09:00:58.686008 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:58 crc kubenswrapper[4492]: I1126 09:00:58.686462 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:00:59 crc kubenswrapper[4492]: I1126 09:00:59.747242 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-hrv7z" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerName="registry-server" probeResult="failure" output=< Nov 26 09:00:59 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 09:00:59 crc kubenswrapper[4492]: > Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.160501 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29402461-44zkt"] Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.161784 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.176684 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402461-44zkt"] Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.243836 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-config-data\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.243981 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wzvg\" (UniqueName: \"kubernetes.io/projected/1dbcc55b-d646-49e5-a82f-6731c256f071-kube-api-access-5wzvg\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.244118 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-fernet-keys\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.244309 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-combined-ca-bundle\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.347680 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-combined-ca-bundle\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.347837 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-config-data\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.348102 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wzvg\" (UniqueName: \"kubernetes.io/projected/1dbcc55b-d646-49e5-a82f-6731c256f071-kube-api-access-5wzvg\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.348307 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-fernet-keys\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.361503 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-combined-ca-bundle\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.361551 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-fernet-keys\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.362034 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-config-data\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.365310 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wzvg\" (UniqueName: \"kubernetes.io/projected/1dbcc55b-d646-49e5-a82f-6731c256f071-kube-api-access-5wzvg\") pod \"keystone-cron-29402461-44zkt\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.479925 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:00 crc kubenswrapper[4492]: I1126 09:01:00.989075 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402461-44zkt"] Nov 26 09:01:01 crc kubenswrapper[4492]: I1126 09:01:01.757029 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402461-44zkt" event={"ID":"1dbcc55b-d646-49e5-a82f-6731c256f071","Type":"ContainerStarted","Data":"e72d655a0c8579edbb2c3b17b84fe75af7e95ff54f8f6caa14927bfef0894e6d"} Nov 26 09:01:01 crc kubenswrapper[4492]: I1126 09:01:01.757767 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402461-44zkt" event={"ID":"1dbcc55b-d646-49e5-a82f-6731c256f071","Type":"ContainerStarted","Data":"a38ebd9155309963ec631d8b306d79409740881307e037f74efe08bf89e812c0"} Nov 26 09:01:01 crc kubenswrapper[4492]: I1126 09:01:01.772737 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29402461-44zkt" podStartSLOduration=1.772724959 podStartE2EDuration="1.772724959s" podCreationTimestamp="2025-11-26 09:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 09:01:01.770678512 +0000 UTC m=+7957.654566810" watchObservedRunningTime="2025-11-26 09:01:01.772724959 +0000 UTC m=+7957.656613258" Nov 26 09:01:02 crc kubenswrapper[4492]: I1126 09:01:02.457051 4492 scope.go:117] "RemoveContainer" containerID="1f6588e4efc1002b62d0ac16c7095f3db0f7f23d829deab454540049f7e73290" Nov 26 09:01:04 crc kubenswrapper[4492]: I1126 09:01:04.786002 4492 generic.go:334] "Generic (PLEG): container finished" podID="1dbcc55b-d646-49e5-a82f-6731c256f071" containerID="e72d655a0c8579edbb2c3b17b84fe75af7e95ff54f8f6caa14927bfef0894e6d" exitCode=0 Nov 26 09:01:04 crc kubenswrapper[4492]: I1126 09:01:04.786094 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402461-44zkt" event={"ID":"1dbcc55b-d646-49e5-a82f-6731c256f071","Type":"ContainerDied","Data":"e72d655a0c8579edbb2c3b17b84fe75af7e95ff54f8f6caa14927bfef0894e6d"} Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.133347 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.199378 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-config-data\") pod \"1dbcc55b-d646-49e5-a82f-6731c256f071\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.199702 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-fernet-keys\") pod \"1dbcc55b-d646-49e5-a82f-6731c256f071\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.199905 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wzvg\" (UniqueName: \"kubernetes.io/projected/1dbcc55b-d646-49e5-a82f-6731c256f071-kube-api-access-5wzvg\") pod \"1dbcc55b-d646-49e5-a82f-6731c256f071\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.200113 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-combined-ca-bundle\") pod \"1dbcc55b-d646-49e5-a82f-6731c256f071\" (UID: \"1dbcc55b-d646-49e5-a82f-6731c256f071\") " Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.223055 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1dbcc55b-d646-49e5-a82f-6731c256f071" (UID: "1dbcc55b-d646-49e5-a82f-6731c256f071"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.231407 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dbcc55b-d646-49e5-a82f-6731c256f071-kube-api-access-5wzvg" (OuterVolumeSpecName: "kube-api-access-5wzvg") pod "1dbcc55b-d646-49e5-a82f-6731c256f071" (UID: "1dbcc55b-d646-49e5-a82f-6731c256f071"). InnerVolumeSpecName "kube-api-access-5wzvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.249762 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1dbcc55b-d646-49e5-a82f-6731c256f071" (UID: "1dbcc55b-d646-49e5-a82f-6731c256f071"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.258598 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-config-data" (OuterVolumeSpecName: "config-data") pod "1dbcc55b-d646-49e5-a82f-6731c256f071" (UID: "1dbcc55b-d646-49e5-a82f-6731c256f071"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.303440 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wzvg\" (UniqueName: \"kubernetes.io/projected/1dbcc55b-d646-49e5-a82f-6731c256f071-kube-api-access-5wzvg\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.303478 4492 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.303491 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.303518 4492 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1dbcc55b-d646-49e5-a82f-6731c256f071-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.806094 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402461-44zkt" event={"ID":"1dbcc55b-d646-49e5-a82f-6731c256f071","Type":"ContainerDied","Data":"a38ebd9155309963ec631d8b306d79409740881307e037f74efe08bf89e812c0"} Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.806775 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a38ebd9155309963ec631d8b306d79409740881307e037f74efe08bf89e812c0" Nov 26 09:01:06 crc kubenswrapper[4492]: I1126 09:01:06.806362 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402461-44zkt" Nov 26 09:01:08 crc kubenswrapper[4492]: I1126 09:01:08.730841 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:01:08 crc kubenswrapper[4492]: I1126 09:01:08.773427 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:01:08 crc kubenswrapper[4492]: I1126 09:01:08.978757 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hrv7z"] Nov 26 09:01:09 crc kubenswrapper[4492]: I1126 09:01:09.826765 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hrv7z" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerName="registry-server" containerID="cri-o://1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5" gracePeriod=2 Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.283419 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.298721 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thw6m\" (UniqueName: \"kubernetes.io/projected/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-kube-api-access-thw6m\") pod \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.298811 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-utilities\") pod \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.298967 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-catalog-content\") pod \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\" (UID: \"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e\") " Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.300805 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-utilities" (OuterVolumeSpecName: "utilities") pod "4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" (UID: "4fa6e09c-5797-4f7c-8941-8fd0c70ea72e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.318741 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-kube-api-access-thw6m" (OuterVolumeSpecName: "kube-api-access-thw6m") pod "4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" (UID: "4fa6e09c-5797-4f7c-8941-8fd0c70ea72e"). InnerVolumeSpecName "kube-api-access-thw6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.366820 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" (UID: "4fa6e09c-5797-4f7c-8941-8fd0c70ea72e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.402106 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.402138 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thw6m\" (UniqueName: \"kubernetes.io/projected/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-kube-api-access-thw6m\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.402150 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.837970 4492 generic.go:334] "Generic (PLEG): container finished" podID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerID="1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5" exitCode=0 Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.838047 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hrv7z" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.838159 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hrv7z" event={"ID":"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e","Type":"ContainerDied","Data":"1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5"} Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.838209 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hrv7z" event={"ID":"4fa6e09c-5797-4f7c-8941-8fd0c70ea72e","Type":"ContainerDied","Data":"d7f1faf00bcf1dc598929667420ba0df1bdb1c19b0900254ebe406dbf228c988"} Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.838263 4492 scope.go:117] "RemoveContainer" containerID="1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.858474 4492 scope.go:117] "RemoveContainer" containerID="041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.868277 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hrv7z"] Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.874188 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hrv7z"] Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.879559 4492 scope.go:117] "RemoveContainer" containerID="f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.912334 4492 scope.go:117] "RemoveContainer" containerID="1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5" Nov 26 09:01:10 crc kubenswrapper[4492]: E1126 09:01:10.914245 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5\": container with ID starting with 1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5 not found: ID does not exist" containerID="1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.914292 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5"} err="failed to get container status \"1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5\": rpc error: code = NotFound desc = could not find container \"1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5\": container with ID starting with 1de5bb3c680436f14bd1aeac85f695d28928825ea797a6592e65191d9fea53e5 not found: ID does not exist" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.914322 4492 scope.go:117] "RemoveContainer" containerID="041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f" Nov 26 09:01:10 crc kubenswrapper[4492]: E1126 09:01:10.915471 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f\": container with ID starting with 041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f not found: ID does not exist" containerID="041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.915510 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f"} err="failed to get container status \"041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f\": rpc error: code = NotFound desc = could not find container \"041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f\": container with ID starting with 041f233f8029931d46001e4d119ea72fd78a370821f4e22e02b99fb6fa01f44f not found: ID does not exist" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.915539 4492 scope.go:117] "RemoveContainer" containerID="f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2" Nov 26 09:01:10 crc kubenswrapper[4492]: E1126 09:01:10.915928 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2\": container with ID starting with f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2 not found: ID does not exist" containerID="f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2" Nov 26 09:01:10 crc kubenswrapper[4492]: I1126 09:01:10.915969 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2"} err="failed to get container status \"f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2\": rpc error: code = NotFound desc = could not find container \"f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2\": container with ID starting with f0448b1e79561a5efb5a6660f0ccd3803022d1b8df4cb23ac5409c2274e256f2 not found: ID does not exist" Nov 26 09:01:11 crc kubenswrapper[4492]: I1126 09:01:11.439413 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:01:11 crc kubenswrapper[4492]: E1126 09:01:11.439675 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:01:12 crc kubenswrapper[4492]: I1126 09:01:12.452730 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" path="/var/lib/kubelet/pods/4fa6e09c-5797-4f7c-8941-8fd0c70ea72e/volumes" Nov 26 09:01:24 crc kubenswrapper[4492]: I1126 09:01:24.473973 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:01:24 crc kubenswrapper[4492]: E1126 09:01:24.476666 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:01:39 crc kubenswrapper[4492]: I1126 09:01:39.438156 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:01:39 crc kubenswrapper[4492]: E1126 09:01:39.439213 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:01:50 crc kubenswrapper[4492]: I1126 09:01:50.439559 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:01:50 crc kubenswrapper[4492]: E1126 09:01:50.440206 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:02:03 crc kubenswrapper[4492]: I1126 09:02:03.439545 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:02:03 crc kubenswrapper[4492]: E1126 09:02:03.440708 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:02:18 crc kubenswrapper[4492]: I1126 09:02:18.439440 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:02:18 crc kubenswrapper[4492]: E1126 09:02:18.440339 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:02:27 crc kubenswrapper[4492]: E1126 09:02:27.301194 4492 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 192.168.25.180:45228->192.168.25.180:45641: read tcp 192.168.25.180:45228->192.168.25.180:45641: read: connection reset by peer Nov 26 09:02:32 crc kubenswrapper[4492]: I1126 09:02:32.439119 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:02:33 crc kubenswrapper[4492]: I1126 09:02:33.642803 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"061e1f81f1523052be73476b27602effef5c98d09cef1e78cf7988c4252da260"} Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.430421 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8h7jh"] Nov 26 09:02:44 crc kubenswrapper[4492]: E1126 09:02:44.434001 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerName="registry-server" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.434042 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerName="registry-server" Nov 26 09:02:44 crc kubenswrapper[4492]: E1126 09:02:44.434098 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerName="extract-content" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.434106 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerName="extract-content" Nov 26 09:02:44 crc kubenswrapper[4492]: E1126 09:02:44.434125 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dbcc55b-d646-49e5-a82f-6731c256f071" containerName="keystone-cron" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.434131 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dbcc55b-d646-49e5-a82f-6731c256f071" containerName="keystone-cron" Nov 26 09:02:44 crc kubenswrapper[4492]: E1126 09:02:44.434163 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerName="extract-utilities" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.434184 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerName="extract-utilities" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.434452 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fa6e09c-5797-4f7c-8941-8fd0c70ea72e" containerName="registry-server" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.434473 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dbcc55b-d646-49e5-a82f-6731c256f071" containerName="keystone-cron" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.437287 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.448256 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8h7jh"] Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.540094 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-utilities\") pod \"certified-operators-8h7jh\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.540206 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-catalog-content\") pod \"certified-operators-8h7jh\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.540236 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gcs4\" (UniqueName: \"kubernetes.io/projected/9f06ab40-d187-472f-8279-5a81d0492c7f-kube-api-access-4gcs4\") pod \"certified-operators-8h7jh\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.642601 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-utilities\") pod \"certified-operators-8h7jh\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.642673 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-catalog-content\") pod \"certified-operators-8h7jh\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.642699 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gcs4\" (UniqueName: \"kubernetes.io/projected/9f06ab40-d187-472f-8279-5a81d0492c7f-kube-api-access-4gcs4\") pod \"certified-operators-8h7jh\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.645287 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-utilities\") pod \"certified-operators-8h7jh\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.645651 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-catalog-content\") pod \"certified-operators-8h7jh\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.661910 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gcs4\" (UniqueName: \"kubernetes.io/projected/9f06ab40-d187-472f-8279-5a81d0492c7f-kube-api-access-4gcs4\") pod \"certified-operators-8h7jh\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:44 crc kubenswrapper[4492]: I1126 09:02:44.778771 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:45 crc kubenswrapper[4492]: I1126 09:02:45.323605 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8h7jh"] Nov 26 09:02:45 crc kubenswrapper[4492]: I1126 09:02:45.752631 4492 generic.go:334] "Generic (PLEG): container finished" podID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerID="d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13" exitCode=0 Nov 26 09:02:45 crc kubenswrapper[4492]: I1126 09:02:45.752721 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h7jh" event={"ID":"9f06ab40-d187-472f-8279-5a81d0492c7f","Type":"ContainerDied","Data":"d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13"} Nov 26 09:02:45 crc kubenswrapper[4492]: I1126 09:02:45.752906 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h7jh" event={"ID":"9f06ab40-d187-472f-8279-5a81d0492c7f","Type":"ContainerStarted","Data":"738e86b2fcba466e8823eace12f8e4b5a8d88119c59b74395ce0eb1e87aa146c"} Nov 26 09:02:47 crc kubenswrapper[4492]: I1126 09:02:47.777035 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h7jh" event={"ID":"9f06ab40-d187-472f-8279-5a81d0492c7f","Type":"ContainerStarted","Data":"79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e"} Nov 26 09:02:48 crc kubenswrapper[4492]: I1126 09:02:48.787466 4492 generic.go:334] "Generic (PLEG): container finished" podID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerID="79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e" exitCode=0 Nov 26 09:02:48 crc kubenswrapper[4492]: I1126 09:02:48.787510 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h7jh" event={"ID":"9f06ab40-d187-472f-8279-5a81d0492c7f","Type":"ContainerDied","Data":"79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e"} Nov 26 09:02:49 crc kubenswrapper[4492]: I1126 09:02:49.805381 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h7jh" event={"ID":"9f06ab40-d187-472f-8279-5a81d0492c7f","Type":"ContainerStarted","Data":"38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26"} Nov 26 09:02:49 crc kubenswrapper[4492]: I1126 09:02:49.825238 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8h7jh" podStartSLOduration=2.220925342 podStartE2EDuration="5.825202168s" podCreationTimestamp="2025-11-26 09:02:44 +0000 UTC" firstStartedPulling="2025-11-26 09:02:45.755934995 +0000 UTC m=+8061.639823293" lastFinishedPulling="2025-11-26 09:02:49.360211821 +0000 UTC m=+8065.244100119" observedRunningTime="2025-11-26 09:02:49.820380894 +0000 UTC m=+8065.704269191" watchObservedRunningTime="2025-11-26 09:02:49.825202168 +0000 UTC m=+8065.709090466" Nov 26 09:02:54 crc kubenswrapper[4492]: I1126 09:02:54.779015 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:54 crc kubenswrapper[4492]: I1126 09:02:54.779712 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:54 crc kubenswrapper[4492]: I1126 09:02:54.830532 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:54 crc kubenswrapper[4492]: I1126 09:02:54.895770 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:55 crc kubenswrapper[4492]: I1126 09:02:55.066291 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8h7jh"] Nov 26 09:02:56 crc kubenswrapper[4492]: I1126 09:02:56.878329 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8h7jh" podUID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerName="registry-server" containerID="cri-o://38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26" gracePeriod=2 Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.423392 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.582113 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gcs4\" (UniqueName: \"kubernetes.io/projected/9f06ab40-d187-472f-8279-5a81d0492c7f-kube-api-access-4gcs4\") pod \"9f06ab40-d187-472f-8279-5a81d0492c7f\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.582241 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-utilities\") pod \"9f06ab40-d187-472f-8279-5a81d0492c7f\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.582351 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-catalog-content\") pod \"9f06ab40-d187-472f-8279-5a81d0492c7f\" (UID: \"9f06ab40-d187-472f-8279-5a81d0492c7f\") " Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.584025 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-utilities" (OuterVolumeSpecName: "utilities") pod "9f06ab40-d187-472f-8279-5a81d0492c7f" (UID: "9f06ab40-d187-472f-8279-5a81d0492c7f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.595654 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f06ab40-d187-472f-8279-5a81d0492c7f-kube-api-access-4gcs4" (OuterVolumeSpecName: "kube-api-access-4gcs4") pod "9f06ab40-d187-472f-8279-5a81d0492c7f" (UID: "9f06ab40-d187-472f-8279-5a81d0492c7f"). InnerVolumeSpecName "kube-api-access-4gcs4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.636434 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9f06ab40-d187-472f-8279-5a81d0492c7f" (UID: "9f06ab40-d187-472f-8279-5a81d0492c7f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.687445 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.687480 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gcs4\" (UniqueName: \"kubernetes.io/projected/9f06ab40-d187-472f-8279-5a81d0492c7f-kube-api-access-4gcs4\") on node \"crc\" DevicePath \"\"" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.687494 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f06ab40-d187-472f-8279-5a81d0492c7f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.888839 4492 generic.go:334] "Generic (PLEG): container finished" podID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerID="38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26" exitCode=0 Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.888896 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h7jh" event={"ID":"9f06ab40-d187-472f-8279-5a81d0492c7f","Type":"ContainerDied","Data":"38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26"} Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.888914 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8h7jh" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.888932 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h7jh" event={"ID":"9f06ab40-d187-472f-8279-5a81d0492c7f","Type":"ContainerDied","Data":"738e86b2fcba466e8823eace12f8e4b5a8d88119c59b74395ce0eb1e87aa146c"} Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.888951 4492 scope.go:117] "RemoveContainer" containerID="38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.921925 4492 scope.go:117] "RemoveContainer" containerID="79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.922528 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8h7jh"] Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.930000 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8h7jh"] Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.949730 4492 scope.go:117] "RemoveContainer" containerID="d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.977967 4492 scope.go:117] "RemoveContainer" containerID="38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26" Nov 26 09:02:57 crc kubenswrapper[4492]: E1126 09:02:57.980165 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26\": container with ID starting with 38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26 not found: ID does not exist" containerID="38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.980895 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26"} err="failed to get container status \"38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26\": rpc error: code = NotFound desc = could not find container \"38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26\": container with ID starting with 38e72b9ac822d851c1756adc613160950bb86d1c87970ea9e5675370de448a26 not found: ID does not exist" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.980930 4492 scope.go:117] "RemoveContainer" containerID="79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e" Nov 26 09:02:57 crc kubenswrapper[4492]: E1126 09:02:57.982977 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e\": container with ID starting with 79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e not found: ID does not exist" containerID="79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.983025 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e"} err="failed to get container status \"79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e\": rpc error: code = NotFound desc = could not find container \"79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e\": container with ID starting with 79a40bf2c38d4d34d38ba3db68206b45390428d1b81d4c2e0c0b880fb6b3723e not found: ID does not exist" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.983062 4492 scope.go:117] "RemoveContainer" containerID="d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13" Nov 26 09:02:57 crc kubenswrapper[4492]: E1126 09:02:57.983477 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13\": container with ID starting with d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13 not found: ID does not exist" containerID="d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13" Nov 26 09:02:57 crc kubenswrapper[4492]: I1126 09:02:57.983506 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13"} err="failed to get container status \"d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13\": rpc error: code = NotFound desc = could not find container \"d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13\": container with ID starting with d186918daa6b9ca1397f8d72756d442512a0cb83df333f563f128389372cca13 not found: ID does not exist" Nov 26 09:02:58 crc kubenswrapper[4492]: I1126 09:02:58.453933 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f06ab40-d187-472f-8279-5a81d0492c7f" path="/var/lib/kubelet/pods/9f06ab40-d187-472f-8279-5a81d0492c7f/volumes" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.493805 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rn49q"] Nov 26 09:04:46 crc kubenswrapper[4492]: E1126 09:04:46.500889 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerName="extract-utilities" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.501163 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerName="extract-utilities" Nov 26 09:04:46 crc kubenswrapper[4492]: E1126 09:04:46.501498 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerName="extract-content" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.501509 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerName="extract-content" Nov 26 09:04:46 crc kubenswrapper[4492]: E1126 09:04:46.501547 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerName="registry-server" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.501560 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerName="registry-server" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.502694 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f06ab40-d187-472f-8279-5a81d0492c7f" containerName="registry-server" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.513509 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.600858 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rn49q"] Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.655658 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-utilities\") pod \"redhat-marketplace-rn49q\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.655738 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-catalog-content\") pod \"redhat-marketplace-rn49q\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.655825 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gpgg\" (UniqueName: \"kubernetes.io/projected/743665b7-c3a5-4290-87cc-ca35dfd32f99-kube-api-access-2gpgg\") pod \"redhat-marketplace-rn49q\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.758097 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-utilities\") pod \"redhat-marketplace-rn49q\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.758137 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-catalog-content\") pod \"redhat-marketplace-rn49q\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.758204 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gpgg\" (UniqueName: \"kubernetes.io/projected/743665b7-c3a5-4290-87cc-ca35dfd32f99-kube-api-access-2gpgg\") pod \"redhat-marketplace-rn49q\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.761530 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-utilities\") pod \"redhat-marketplace-rn49q\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.762010 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-catalog-content\") pod \"redhat-marketplace-rn49q\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.792858 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gpgg\" (UniqueName: \"kubernetes.io/projected/743665b7-c3a5-4290-87cc-ca35dfd32f99-kube-api-access-2gpgg\") pod \"redhat-marketplace-rn49q\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:46 crc kubenswrapper[4492]: I1126 09:04:46.840354 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:47 crc kubenswrapper[4492]: I1126 09:04:47.750022 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rn49q"] Nov 26 09:04:47 crc kubenswrapper[4492]: I1126 09:04:47.862674 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rn49q" event={"ID":"743665b7-c3a5-4290-87cc-ca35dfd32f99","Type":"ContainerStarted","Data":"9f67bc1caa4aa047bb0ba471ec1ae13bbd588445260bf972e05eefe0ea4d2832"} Nov 26 09:04:48 crc kubenswrapper[4492]: I1126 09:04:48.888347 4492 generic.go:334] "Generic (PLEG): container finished" podID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerID="7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096" exitCode=0 Nov 26 09:04:48 crc kubenswrapper[4492]: I1126 09:04:48.889205 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rn49q" event={"ID":"743665b7-c3a5-4290-87cc-ca35dfd32f99","Type":"ContainerDied","Data":"7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096"} Nov 26 09:04:49 crc kubenswrapper[4492]: I1126 09:04:49.442249 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:04:49 crc kubenswrapper[4492]: I1126 09:04:49.443519 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:04:50 crc kubenswrapper[4492]: I1126 09:04:50.910235 4492 generic.go:334] "Generic (PLEG): container finished" podID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerID="e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9" exitCode=0 Nov 26 09:04:50 crc kubenswrapper[4492]: I1126 09:04:50.910282 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rn49q" event={"ID":"743665b7-c3a5-4290-87cc-ca35dfd32f99","Type":"ContainerDied","Data":"e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9"} Nov 26 09:04:51 crc kubenswrapper[4492]: I1126 09:04:51.926299 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rn49q" event={"ID":"743665b7-c3a5-4290-87cc-ca35dfd32f99","Type":"ContainerStarted","Data":"2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65"} Nov 26 09:04:51 crc kubenswrapper[4492]: I1126 09:04:51.956139 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rn49q" podStartSLOduration=3.4118606590000002 podStartE2EDuration="5.955193014s" podCreationTimestamp="2025-11-26 09:04:46 +0000 UTC" firstStartedPulling="2025-11-26 09:04:48.893212909 +0000 UTC m=+8184.777101207" lastFinishedPulling="2025-11-26 09:04:51.436545264 +0000 UTC m=+8187.320433562" observedRunningTime="2025-11-26 09:04:51.945088716 +0000 UTC m=+8187.828977014" watchObservedRunningTime="2025-11-26 09:04:51.955193014 +0000 UTC m=+8187.839081312" Nov 26 09:04:56 crc kubenswrapper[4492]: I1126 09:04:56.841103 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:56 crc kubenswrapper[4492]: I1126 09:04:56.841857 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:56 crc kubenswrapper[4492]: I1126 09:04:56.888375 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:57 crc kubenswrapper[4492]: I1126 09:04:57.024658 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:57 crc kubenswrapper[4492]: I1126 09:04:57.126197 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rn49q"] Nov 26 09:04:58 crc kubenswrapper[4492]: I1126 09:04:58.993348 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rn49q" podUID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerName="registry-server" containerID="cri-o://2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65" gracePeriod=2 Nov 26 09:04:59 crc kubenswrapper[4492]: I1126 09:04:59.777838 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:04:59 crc kubenswrapper[4492]: I1126 09:04:59.948547 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gpgg\" (UniqueName: \"kubernetes.io/projected/743665b7-c3a5-4290-87cc-ca35dfd32f99-kube-api-access-2gpgg\") pod \"743665b7-c3a5-4290-87cc-ca35dfd32f99\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " Nov 26 09:04:59 crc kubenswrapper[4492]: I1126 09:04:59.948934 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-catalog-content\") pod \"743665b7-c3a5-4290-87cc-ca35dfd32f99\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " Nov 26 09:04:59 crc kubenswrapper[4492]: I1126 09:04:59.949080 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-utilities\") pod \"743665b7-c3a5-4290-87cc-ca35dfd32f99\" (UID: \"743665b7-c3a5-4290-87cc-ca35dfd32f99\") " Nov 26 09:04:59 crc kubenswrapper[4492]: I1126 09:04:59.949878 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-utilities" (OuterVolumeSpecName: "utilities") pod "743665b7-c3a5-4290-87cc-ca35dfd32f99" (UID: "743665b7-c3a5-4290-87cc-ca35dfd32f99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:04:59 crc kubenswrapper[4492]: I1126 09:04:59.950820 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:04:59 crc kubenswrapper[4492]: I1126 09:04:59.966760 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "743665b7-c3a5-4290-87cc-ca35dfd32f99" (UID: "743665b7-c3a5-4290-87cc-ca35dfd32f99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:04:59 crc kubenswrapper[4492]: I1126 09:04:59.967311 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/743665b7-c3a5-4290-87cc-ca35dfd32f99-kube-api-access-2gpgg" (OuterVolumeSpecName: "kube-api-access-2gpgg") pod "743665b7-c3a5-4290-87cc-ca35dfd32f99" (UID: "743665b7-c3a5-4290-87cc-ca35dfd32f99"). InnerVolumeSpecName "kube-api-access-2gpgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.033784 4492 generic.go:334] "Generic (PLEG): container finished" podID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerID="2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65" exitCode=0 Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.033853 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rn49q" event={"ID":"743665b7-c3a5-4290-87cc-ca35dfd32f99","Type":"ContainerDied","Data":"2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65"} Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.034120 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rn49q" event={"ID":"743665b7-c3a5-4290-87cc-ca35dfd32f99","Type":"ContainerDied","Data":"9f67bc1caa4aa047bb0ba471ec1ae13bbd588445260bf972e05eefe0ea4d2832"} Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.039684 4492 scope.go:117] "RemoveContainer" containerID="2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.039971 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rn49q" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.062545 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gpgg\" (UniqueName: \"kubernetes.io/projected/743665b7-c3a5-4290-87cc-ca35dfd32f99-kube-api-access-2gpgg\") on node \"crc\" DevicePath \"\"" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.063072 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743665b7-c3a5-4290-87cc-ca35dfd32f99-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.082117 4492 scope.go:117] "RemoveContainer" containerID="e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.096288 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rn49q"] Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.103598 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rn49q"] Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.113213 4492 scope.go:117] "RemoveContainer" containerID="7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.141061 4492 scope.go:117] "RemoveContainer" containerID="2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65" Nov 26 09:05:00 crc kubenswrapper[4492]: E1126 09:05:00.145339 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65\": container with ID starting with 2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65 not found: ID does not exist" containerID="2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.145417 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65"} err="failed to get container status \"2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65\": rpc error: code = NotFound desc = could not find container \"2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65\": container with ID starting with 2473651df3195a6a620dc16e4c655c809c76a440d6b032ee4a3846104f0dad65 not found: ID does not exist" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.145460 4492 scope.go:117] "RemoveContainer" containerID="e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9" Nov 26 09:05:00 crc kubenswrapper[4492]: E1126 09:05:00.146051 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9\": container with ID starting with e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9 not found: ID does not exist" containerID="e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.146120 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9"} err="failed to get container status \"e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9\": rpc error: code = NotFound desc = could not find container \"e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9\": container with ID starting with e1809155bcf5138908405bbd8eceb6e823960db697bd54620bcf5314dd1f20d9 not found: ID does not exist" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.146150 4492 scope.go:117] "RemoveContainer" containerID="7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096" Nov 26 09:05:00 crc kubenswrapper[4492]: E1126 09:05:00.146659 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096\": container with ID starting with 7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096 not found: ID does not exist" containerID="7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.146712 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096"} err="failed to get container status \"7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096\": rpc error: code = NotFound desc = could not find container \"7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096\": container with ID starting with 7ae71e04b1391173dccc4892600b22bac804f1a719f2f1a608ecf53a92d36096 not found: ID does not exist" Nov 26 09:05:00 crc kubenswrapper[4492]: I1126 09:05:00.454928 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="743665b7-c3a5-4290-87cc-ca35dfd32f99" path="/var/lib/kubelet/pods/743665b7-c3a5-4290-87cc-ca35dfd32f99/volumes" Nov 26 09:05:19 crc kubenswrapper[4492]: I1126 09:05:19.442262 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:05:19 crc kubenswrapper[4492]: I1126 09:05:19.443014 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.701101 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ddvm5"] Nov 26 09:05:45 crc kubenswrapper[4492]: E1126 09:05:45.706472 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerName="extract-utilities" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.706510 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerName="extract-utilities" Nov 26 09:05:45 crc kubenswrapper[4492]: E1126 09:05:45.706802 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerName="registry-server" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.706818 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerName="registry-server" Nov 26 09:05:45 crc kubenswrapper[4492]: E1126 09:05:45.706859 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerName="extract-content" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.706867 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerName="extract-content" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.708403 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="743665b7-c3a5-4290-87cc-ca35dfd32f99" containerName="registry-server" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.713499 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.733096 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ddvm5"] Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.858466 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-utilities\") pod \"redhat-operators-ddvm5\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.859009 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdnnj\" (UniqueName: \"kubernetes.io/projected/a978e747-8a24-42e9-bd02-cfbffad340c7-kube-api-access-bdnnj\") pod \"redhat-operators-ddvm5\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.859361 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-catalog-content\") pod \"redhat-operators-ddvm5\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.962256 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-catalog-content\") pod \"redhat-operators-ddvm5\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.962832 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-utilities\") pod \"redhat-operators-ddvm5\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.963489 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdnnj\" (UniqueName: \"kubernetes.io/projected/a978e747-8a24-42e9-bd02-cfbffad340c7-kube-api-access-bdnnj\") pod \"redhat-operators-ddvm5\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.964115 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-utilities\") pod \"redhat-operators-ddvm5\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.964113 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-catalog-content\") pod \"redhat-operators-ddvm5\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:45 crc kubenswrapper[4492]: I1126 09:05:45.995727 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdnnj\" (UniqueName: \"kubernetes.io/projected/a978e747-8a24-42e9-bd02-cfbffad340c7-kube-api-access-bdnnj\") pod \"redhat-operators-ddvm5\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:46 crc kubenswrapper[4492]: I1126 09:05:46.037979 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:46 crc kubenswrapper[4492]: I1126 09:05:46.934840 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ddvm5"] Nov 26 09:05:47 crc kubenswrapper[4492]: I1126 09:05:47.476458 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ddvm5" event={"ID":"a978e747-8a24-42e9-bd02-cfbffad340c7","Type":"ContainerDied","Data":"f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23"} Nov 26 09:05:47 crc kubenswrapper[4492]: I1126 09:05:47.476978 4492 generic.go:334] "Generic (PLEG): container finished" podID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerID="f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23" exitCode=0 Nov 26 09:05:47 crc kubenswrapper[4492]: I1126 09:05:47.477363 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ddvm5" event={"ID":"a978e747-8a24-42e9-bd02-cfbffad340c7","Type":"ContainerStarted","Data":"856d2dc8c5790ead760d55c13808344b8d5622df70aa047d3f17a683dc093d8f"} Nov 26 09:05:48 crc kubenswrapper[4492]: I1126 09:05:48.493532 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ddvm5" event={"ID":"a978e747-8a24-42e9-bd02-cfbffad340c7","Type":"ContainerStarted","Data":"6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330"} Nov 26 09:05:49 crc kubenswrapper[4492]: I1126 09:05:49.442437 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:05:49 crc kubenswrapper[4492]: I1126 09:05:49.444536 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:05:49 crc kubenswrapper[4492]: I1126 09:05:49.444622 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 09:05:49 crc kubenswrapper[4492]: I1126 09:05:49.445616 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"061e1f81f1523052be73476b27602effef5c98d09cef1e78cf7988c4252da260"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:05:49 crc kubenswrapper[4492]: I1126 09:05:49.445714 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://061e1f81f1523052be73476b27602effef5c98d09cef1e78cf7988c4252da260" gracePeriod=600 Nov 26 09:05:50 crc kubenswrapper[4492]: I1126 09:05:50.516083 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="061e1f81f1523052be73476b27602effef5c98d09cef1e78cf7988c4252da260" exitCode=0 Nov 26 09:05:50 crc kubenswrapper[4492]: I1126 09:05:50.516289 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"061e1f81f1523052be73476b27602effef5c98d09cef1e78cf7988c4252da260"} Nov 26 09:05:50 crc kubenswrapper[4492]: I1126 09:05:50.516456 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerStarted","Data":"26ad1ff2e3509799014046a9dc1337022ea620be2e50ca261502e30860d2b41d"} Nov 26 09:05:50 crc kubenswrapper[4492]: I1126 09:05:50.517750 4492 scope.go:117] "RemoveContainer" containerID="743a43e250ae8ce23e6d073e4fe1db81f92bd076effc339817b028528d0faab4" Nov 26 09:05:51 crc kubenswrapper[4492]: I1126 09:05:51.539008 4492 generic.go:334] "Generic (PLEG): container finished" podID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerID="6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330" exitCode=0 Nov 26 09:05:51 crc kubenswrapper[4492]: I1126 09:05:51.539329 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ddvm5" event={"ID":"a978e747-8a24-42e9-bd02-cfbffad340c7","Type":"ContainerDied","Data":"6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330"} Nov 26 09:05:51 crc kubenswrapper[4492]: I1126 09:05:51.544733 4492 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 09:05:52 crc kubenswrapper[4492]: I1126 09:05:52.562596 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ddvm5" event={"ID":"a978e747-8a24-42e9-bd02-cfbffad340c7","Type":"ContainerStarted","Data":"8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda"} Nov 26 09:05:52 crc kubenswrapper[4492]: I1126 09:05:52.592359 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ddvm5" podStartSLOduration=3.017736455 podStartE2EDuration="7.591449265s" podCreationTimestamp="2025-11-26 09:05:45 +0000 UTC" firstStartedPulling="2025-11-26 09:05:47.478343101 +0000 UTC m=+8243.362231400" lastFinishedPulling="2025-11-26 09:05:52.052055922 +0000 UTC m=+8247.935944210" observedRunningTime="2025-11-26 09:05:52.584418516 +0000 UTC m=+8248.468306814" watchObservedRunningTime="2025-11-26 09:05:52.591449265 +0000 UTC m=+8248.475337563" Nov 26 09:05:56 crc kubenswrapper[4492]: I1126 09:05:56.039146 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:56 crc kubenswrapper[4492]: I1126 09:05:56.039618 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:05:57 crc kubenswrapper[4492]: I1126 09:05:57.093375 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ddvm5" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="registry-server" probeResult="failure" output=< Nov 26 09:05:57 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 09:05:57 crc kubenswrapper[4492]: > Nov 26 09:06:07 crc kubenswrapper[4492]: I1126 09:06:07.088051 4492 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ddvm5" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="registry-server" probeResult="failure" output=< Nov 26 09:06:07 crc kubenswrapper[4492]: timeout: failed to connect service ":50051" within 1s Nov 26 09:06:07 crc kubenswrapper[4492]: > Nov 26 09:06:16 crc kubenswrapper[4492]: I1126 09:06:16.118338 4492 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:06:16 crc kubenswrapper[4492]: I1126 09:06:16.165042 4492 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:06:16 crc kubenswrapper[4492]: I1126 09:06:16.307125 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ddvm5"] Nov 26 09:06:17 crc kubenswrapper[4492]: I1126 09:06:17.806446 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ddvm5" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="registry-server" containerID="cri-o://8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda" gracePeriod=2 Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.772300 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.814183 4492 generic.go:334] "Generic (PLEG): container finished" podID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerID="8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda" exitCode=0 Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.814488 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ddvm5" event={"ID":"a978e747-8a24-42e9-bd02-cfbffad340c7","Type":"ContainerDied","Data":"8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda"} Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.814530 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ddvm5" event={"ID":"a978e747-8a24-42e9-bd02-cfbffad340c7","Type":"ContainerDied","Data":"856d2dc8c5790ead760d55c13808344b8d5622df70aa047d3f17a683dc093d8f"} Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.815564 4492 scope.go:117] "RemoveContainer" containerID="8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda" Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.816273 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ddvm5" Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.901162 4492 scope.go:117] "RemoveContainer" containerID="6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330" Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.901347 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-utilities\") pod \"a978e747-8a24-42e9-bd02-cfbffad340c7\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.901534 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdnnj\" (UniqueName: \"kubernetes.io/projected/a978e747-8a24-42e9-bd02-cfbffad340c7-kube-api-access-bdnnj\") pod \"a978e747-8a24-42e9-bd02-cfbffad340c7\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.901576 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-catalog-content\") pod \"a978e747-8a24-42e9-bd02-cfbffad340c7\" (UID: \"a978e747-8a24-42e9-bd02-cfbffad340c7\") " Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.907079 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-utilities" (OuterVolumeSpecName: "utilities") pod "a978e747-8a24-42e9-bd02-cfbffad340c7" (UID: "a978e747-8a24-42e9-bd02-cfbffad340c7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.931807 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a978e747-8a24-42e9-bd02-cfbffad340c7-kube-api-access-bdnnj" (OuterVolumeSpecName: "kube-api-access-bdnnj") pod "a978e747-8a24-42e9-bd02-cfbffad340c7" (UID: "a978e747-8a24-42e9-bd02-cfbffad340c7"). InnerVolumeSpecName "kube-api-access-bdnnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:06:18 crc kubenswrapper[4492]: I1126 09:06:18.942703 4492 scope.go:117] "RemoveContainer" containerID="f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.001403 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a978e747-8a24-42e9-bd02-cfbffad340c7" (UID: "a978e747-8a24-42e9-bd02-cfbffad340c7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.005729 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdnnj\" (UniqueName: \"kubernetes.io/projected/a978e747-8a24-42e9-bd02-cfbffad340c7-kube-api-access-bdnnj\") on node \"crc\" DevicePath \"\"" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.005771 4492 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.005783 4492 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a978e747-8a24-42e9-bd02-cfbffad340c7-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.013444 4492 scope.go:117] "RemoveContainer" containerID="8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda" Nov 26 09:06:19 crc kubenswrapper[4492]: E1126 09:06:19.018624 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda\": container with ID starting with 8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda not found: ID does not exist" containerID="8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.018740 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda"} err="failed to get container status \"8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda\": rpc error: code = NotFound desc = could not find container \"8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda\": container with ID starting with 8679c8c4cfecb52603defb9b3f72c465fd8d766ba52aae3e039a1ba32657ffda not found: ID does not exist" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.018786 4492 scope.go:117] "RemoveContainer" containerID="6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330" Nov 26 09:06:19 crc kubenswrapper[4492]: E1126 09:06:19.019481 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330\": container with ID starting with 6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330 not found: ID does not exist" containerID="6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.019534 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330"} err="failed to get container status \"6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330\": rpc error: code = NotFound desc = could not find container \"6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330\": container with ID starting with 6d68ae8b4620bb2092c98bcfb658ac774fc480ae092ca570ab0a081b75f8c330 not found: ID does not exist" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.019571 4492 scope.go:117] "RemoveContainer" containerID="f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23" Nov 26 09:06:19 crc kubenswrapper[4492]: E1126 09:06:19.020035 4492 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23\": container with ID starting with f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23 not found: ID does not exist" containerID="f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.020063 4492 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23"} err="failed to get container status \"f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23\": rpc error: code = NotFound desc = could not find container \"f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23\": container with ID starting with f9998ac9003b2d1165049b974bfc7a65c5f8242ee57d400afc1df963b17b4f23 not found: ID does not exist" Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.154415 4492 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ddvm5"] Nov 26 09:06:19 crc kubenswrapper[4492]: I1126 09:06:19.164152 4492 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ddvm5"] Nov 26 09:06:20 crc kubenswrapper[4492]: I1126 09:06:20.450227 4492 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" path="/var/lib/kubelet/pods/a978e747-8a24-42e9-bd02-cfbffad340c7/volumes" Nov 26 09:08:14 crc kubenswrapper[4492]: I1126 09:08:14.996324 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"a7961ca9-f70f-4cb7-97c3-440a53316f29","Type":"ContainerDied","Data":"37fbe7fded7ea0aca54f988801f6cfb0979e06b162f2afd909aacb855185a038"} Nov 26 09:08:14 crc kubenswrapper[4492]: I1126 09:08:14.995980 4492 generic.go:334] "Generic (PLEG): container finished" podID="a7961ca9-f70f-4cb7-97c3-440a53316f29" containerID="37fbe7fded7ea0aca54f988801f6cfb0979e06b162f2afd909aacb855185a038" exitCode=0 Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.769098 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.847751 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ca-certs\") pod \"a7961ca9-f70f-4cb7-97c3-440a53316f29\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.847981 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config-secret\") pod \"a7961ca9-f70f-4cb7-97c3-440a53316f29\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.848074 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-workdir\") pod \"a7961ca9-f70f-4cb7-97c3-440a53316f29\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.848247 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config\") pod \"a7961ca9-f70f-4cb7-97c3-440a53316f29\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.848327 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"a7961ca9-f70f-4cb7-97c3-440a53316f29\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.848404 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-config-data\") pod \"a7961ca9-f70f-4cb7-97c3-440a53316f29\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.848434 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-temporary\") pod \"a7961ca9-f70f-4cb7-97c3-440a53316f29\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.848456 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k94mg\" (UniqueName: \"kubernetes.io/projected/a7961ca9-f70f-4cb7-97c3-440a53316f29-kube-api-access-k94mg\") pod \"a7961ca9-f70f-4cb7-97c3-440a53316f29\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.848492 4492 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ssh-key\") pod \"a7961ca9-f70f-4cb7-97c3-440a53316f29\" (UID: \"a7961ca9-f70f-4cb7-97c3-440a53316f29\") " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.850903 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "a7961ca9-f70f-4cb7-97c3-440a53316f29" (UID: "a7961ca9-f70f-4cb7-97c3-440a53316f29"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.851802 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-config-data" (OuterVolumeSpecName: "config-data") pod "a7961ca9-f70f-4cb7-97c3-440a53316f29" (UID: "a7961ca9-f70f-4cb7-97c3-440a53316f29"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.856011 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "a7961ca9-f70f-4cb7-97c3-440a53316f29" (UID: "a7961ca9-f70f-4cb7-97c3-440a53316f29"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.867793 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7961ca9-f70f-4cb7-97c3-440a53316f29-kube-api-access-k94mg" (OuterVolumeSpecName: "kube-api-access-k94mg") pod "a7961ca9-f70f-4cb7-97c3-440a53316f29" (UID: "a7961ca9-f70f-4cb7-97c3-440a53316f29"). InnerVolumeSpecName "kube-api-access-k94mg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.877326 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "test-operator-logs") pod "a7961ca9-f70f-4cb7-97c3-440a53316f29" (UID: "a7961ca9-f70f-4cb7-97c3-440a53316f29"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.882297 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a7961ca9-f70f-4cb7-97c3-440a53316f29" (UID: "a7961ca9-f70f-4cb7-97c3-440a53316f29"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.882759 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "a7961ca9-f70f-4cb7-97c3-440a53316f29" (UID: "a7961ca9-f70f-4cb7-97c3-440a53316f29"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.884071 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "a7961ca9-f70f-4cb7-97c3-440a53316f29" (UID: "a7961ca9-f70f-4cb7-97c3-440a53316f29"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.897458 4492 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "a7961ca9-f70f-4cb7-97c3-440a53316f29" (UID: "a7961ca9-f70f-4cb7-97c3-440a53316f29"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.951157 4492 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.951197 4492 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.951209 4492 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.951263 4492 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.951272 4492 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7961ca9-f70f-4cb7-97c3-440a53316f29-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.951285 4492 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a7961ca9-f70f-4cb7-97c3-440a53316f29-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.951295 4492 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k94mg\" (UniqueName: \"kubernetes.io/projected/a7961ca9-f70f-4cb7-97c3-440a53316f29-kube-api-access-k94mg\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.951303 4492 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.951310 4492 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a7961ca9-f70f-4cb7-97c3-440a53316f29-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:16 crc kubenswrapper[4492]: I1126 09:08:16.968563 4492 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 26 09:08:17 crc kubenswrapper[4492]: I1126 09:08:17.018465 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"a7961ca9-f70f-4cb7-97c3-440a53316f29","Type":"ContainerDied","Data":"4834cbf127ee0534b76a2a93406830d34e0bf77096d47b3c2bf46627ef2267d0"} Nov 26 09:08:17 crc kubenswrapper[4492]: I1126 09:08:17.018527 4492 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4834cbf127ee0534b76a2a93406830d34e0bf77096d47b3c2bf46627ef2267d0" Nov 26 09:08:17 crc kubenswrapper[4492]: I1126 09:08:17.018840 4492 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Nov 26 09:08:17 crc kubenswrapper[4492]: I1126 09:08:17.052128 4492 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.953977 4492 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 09:08:18 crc kubenswrapper[4492]: E1126 09:08:18.956354 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7961ca9-f70f-4cb7-97c3-440a53316f29" containerName="tempest-tests-tempest-tests-runner" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.956378 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7961ca9-f70f-4cb7-97c3-440a53316f29" containerName="tempest-tests-tempest-tests-runner" Nov 26 09:08:18 crc kubenswrapper[4492]: E1126 09:08:18.956671 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="extract-utilities" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.956685 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="extract-utilities" Nov 26 09:08:18 crc kubenswrapper[4492]: E1126 09:08:18.956718 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="extract-content" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.956724 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="extract-content" Nov 26 09:08:18 crc kubenswrapper[4492]: E1126 09:08:18.956764 4492 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="registry-server" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.956770 4492 state_mem.go:107] "Deleted CPUSet assignment" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="registry-server" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.957684 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a978e747-8a24-42e9-bd02-cfbffad340c7" containerName="registry-server" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.957746 4492 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7961ca9-f70f-4cb7-97c3-440a53316f29" containerName="tempest-tests-tempest-tests-runner" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.960396 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.966022 4492 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-flqkh" Nov 26 09:08:18 crc kubenswrapper[4492]: I1126 09:08:18.977042 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.099550 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"56059bfb-0bd6-43b6-8e83-5bc40b704909\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.100507 4492 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kl2jl\" (UniqueName: \"kubernetes.io/projected/56059bfb-0bd6-43b6-8e83-5bc40b704909-kube-api-access-kl2jl\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"56059bfb-0bd6-43b6-8e83-5bc40b704909\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.203727 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kl2jl\" (UniqueName: \"kubernetes.io/projected/56059bfb-0bd6-43b6-8e83-5bc40b704909-kube-api-access-kl2jl\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"56059bfb-0bd6-43b6-8e83-5bc40b704909\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.203860 4492 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"56059bfb-0bd6-43b6-8e83-5bc40b704909\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.204869 4492 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"56059bfb-0bd6-43b6-8e83-5bc40b704909\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.222353 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kl2jl\" (UniqueName: \"kubernetes.io/projected/56059bfb-0bd6-43b6-8e83-5bc40b704909-kube-api-access-kl2jl\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"56059bfb-0bd6-43b6-8e83-5bc40b704909\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.230209 4492 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"56059bfb-0bd6-43b6-8e83-5bc40b704909\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.284918 4492 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.441186 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.441536 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:08:19 crc kubenswrapper[4492]: I1126 09:08:19.761955 4492 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 09:08:20 crc kubenswrapper[4492]: I1126 09:08:20.049988 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"56059bfb-0bd6-43b6-8e83-5bc40b704909","Type":"ContainerStarted","Data":"5a263b1d0ebb1f792fe5645caa98224a7dce4da386dd0920a9c2093009602fcc"} Nov 26 09:08:22 crc kubenswrapper[4492]: I1126 09:08:22.073910 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"56059bfb-0bd6-43b6-8e83-5bc40b704909","Type":"ContainerStarted","Data":"293a8b496692aab67b061977e901b45840f1196144e0c60de334cc1074cf52cb"} Nov 26 09:08:22 crc kubenswrapper[4492]: I1126 09:08:22.097709 4492 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.336206791 podStartE2EDuration="4.097052301s" podCreationTimestamp="2025-11-26 09:08:18 +0000 UTC" firstStartedPulling="2025-11-26 09:08:19.774626813 +0000 UTC m=+8395.658515111" lastFinishedPulling="2025-11-26 09:08:21.535472323 +0000 UTC m=+8397.419360621" observedRunningTime="2025-11-26 09:08:22.088463333 +0000 UTC m=+8397.972351632" watchObservedRunningTime="2025-11-26 09:08:22.097052301 +0000 UTC m=+8397.980940599" Nov 26 09:08:49 crc kubenswrapper[4492]: I1126 09:08:49.441607 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:08:49 crc kubenswrapper[4492]: I1126 09:08:49.442365 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:09:19 crc kubenswrapper[4492]: I1126 09:09:19.441295 4492 patch_prober.go:28] interesting pod/machine-config-daemon-6blv7 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 09:09:19 crc kubenswrapper[4492]: I1126 09:09:19.442260 4492 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 09:09:19 crc kubenswrapper[4492]: I1126 09:09:19.442318 4492 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" Nov 26 09:09:19 crc kubenswrapper[4492]: I1126 09:09:19.442737 4492 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"26ad1ff2e3509799014046a9dc1337022ea620be2e50ca261502e30860d2b41d"} pod="openshift-machine-config-operator/machine-config-daemon-6blv7" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 09:09:19 crc kubenswrapper[4492]: I1126 09:09:19.442801 4492 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerName="machine-config-daemon" containerID="cri-o://26ad1ff2e3509799014046a9dc1337022ea620be2e50ca261502e30860d2b41d" gracePeriod=600 Nov 26 09:09:19 crc kubenswrapper[4492]: E1126 09:09:19.560542 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:09:19 crc kubenswrapper[4492]: I1126 09:09:19.638678 4492 generic.go:334] "Generic (PLEG): container finished" podID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" containerID="26ad1ff2e3509799014046a9dc1337022ea620be2e50ca261502e30860d2b41d" exitCode=0 Nov 26 09:09:19 crc kubenswrapper[4492]: I1126 09:09:19.638731 4492 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" event={"ID":"04bf18ad-d2a1-4b30-a3fa-2b6247363c82","Type":"ContainerDied","Data":"26ad1ff2e3509799014046a9dc1337022ea620be2e50ca261502e30860d2b41d"} Nov 26 09:09:19 crc kubenswrapper[4492]: I1126 09:09:19.638778 4492 scope.go:117] "RemoveContainer" containerID="061e1f81f1523052be73476b27602effef5c98d09cef1e78cf7988c4252da260" Nov 26 09:09:19 crc kubenswrapper[4492]: I1126 09:09:19.639600 4492 scope.go:117] "RemoveContainer" containerID="26ad1ff2e3509799014046a9dc1337022ea620be2e50ca261502e30860d2b41d" Nov 26 09:09:19 crc kubenswrapper[4492]: E1126 09:09:19.639861 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:09:31 crc kubenswrapper[4492]: I1126 09:09:31.439048 4492 scope.go:117] "RemoveContainer" containerID="26ad1ff2e3509799014046a9dc1337022ea620be2e50ca261502e30860d2b41d" Nov 26 09:09:31 crc kubenswrapper[4492]: E1126 09:09:31.442639 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:09:46 crc kubenswrapper[4492]: I1126 09:09:46.439444 4492 scope.go:117] "RemoveContainer" containerID="26ad1ff2e3509799014046a9dc1337022ea620be2e50ca261502e30860d2b41d" Nov 26 09:09:46 crc kubenswrapper[4492]: E1126 09:09:46.440409 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" Nov 26 09:09:59 crc kubenswrapper[4492]: I1126 09:09:59.438133 4492 scope.go:117] "RemoveContainer" containerID="26ad1ff2e3509799014046a9dc1337022ea620be2e50ca261502e30860d2b41d" Nov 26 09:09:59 crc kubenswrapper[4492]: E1126 09:09:59.438755 4492 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6blv7_openshift-machine-config-operator(04bf18ad-d2a1-4b30-a3fa-2b6247363c82)\"" pod="openshift-machine-config-operator/machine-config-daemon-6blv7" podUID="04bf18ad-d2a1-4b30-a3fa-2b6247363c82" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111541770024447 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111541770017364 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111521027016500 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111521030015442 5ustar corecore